VOL-2017 voltha-lib moved from voltha-go;
release version 2.2.1
Based on voltha-go commit 5259f8e52b3e3f5c7ad422a4b0e506e1d07f6b36
Change-Id: I8bbecdf456e420714a4016120eafc0d237c80565
diff --git a/pkg/common/grpc/security.go b/pkg/common/grpc/security.go
new file mode 100644
index 0000000..930d2c8
--- /dev/null
+++ b/pkg/common/grpc/security.go
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package grpc
+
+type GrpcSecurity struct {
+ KeyFile string
+ CertFile string
+ CaFile string
+}
diff --git a/pkg/common/grpc/server.go b/pkg/common/grpc/server.go
new file mode 100644
index 0000000..65a0e95
--- /dev/null
+++ b/pkg/common/grpc/server.go
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package grpc
+
+import (
+ "context"
+ "fmt"
+ "github.com/opencord/voltha-lib-go/pkg/common/log"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "net"
+)
+
+/*
+To add a GRPC server to your existing component simply follow these steps:
+
+1. Create a server instance by passing the host and port where it should run and optionally add certificate information
+
+ e.g.
+ s.server = server.NewGrpcServer(s.config.GrpcHost, s.config.GrpcPort, nil, false)
+
+2. Create a function that will register your service with the GRPC server
+
+ e.g.
+ f := func(gs *grpc.Server) {
+ voltha.RegisterVolthaReadOnlyServiceServer(
+ gs,
+ core.NewReadOnlyServiceHandler(s.root),
+ )
+ }
+
+3. Add the service to the server
+
+ e.g.
+ s.server.AddService(f)
+
+4. Start the server
+
+ s.server.Start(ctx)
+*/
+
+type GrpcServer struct {
+ gs *grpc.Server
+ address string
+ port int
+ secure bool
+ services []func(*grpc.Server)
+
+ *GrpcSecurity
+}
+
+/*
+Instantiate a GRPC server data structure
+*/
+func NewGrpcServer(
+ address string,
+ port int,
+ certs *GrpcSecurity,
+ secure bool,
+) *GrpcServer {
+ server := &GrpcServer{
+ address: address,
+ port: port,
+ secure: secure,
+ GrpcSecurity: certs,
+ }
+ return server
+}
+
+/*
+Start prepares the GRPC server and starts servicing requests
+*/
+func (s *GrpcServer) Start(ctx context.Context) {
+
+ host := fmt.Sprintf("%s:%d", s.address, s.port)
+
+ lis, err := net.Listen("tcp", host)
+ if err != nil {
+ log.Fatalf("failed to listen: %v", err)
+ }
+
+ if s.secure && s.GrpcSecurity != nil {
+ creds, err := credentials.NewServerTLSFromFile(s.CertFile, s.KeyFile)
+ if err != nil {
+ log.Fatalf("could not load TLS keys: %s", err)
+ }
+ s.gs = grpc.NewServer(grpc.Creds(creds))
+
+ } else {
+ log.Info("starting-insecure-grpc-server")
+ s.gs = grpc.NewServer()
+ }
+
+ // Register all required services
+ for _, service := range s.services {
+ service(s.gs)
+ }
+
+ if err := s.gs.Serve(lis); err != nil {
+ log.Fatalf("failed to serve: %v\n", err)
+ }
+}
+
+/*
+Stop servicing GRPC requests
+*/
+func (s *GrpcServer) Stop() {
+ if s.gs != nil {
+ s.gs.Stop()
+ }
+}
+
+/*
+AddService appends a generic service request function
+*/
+func (s *GrpcServer) AddService(
+ registerFunction func(*grpc.Server),
+) {
+ s.services = append(s.services, registerFunction)
+}
diff --git a/pkg/common/log/log.go b/pkg/common/log/log.go
new file mode 100644
index 0000000..fe3a4e0
--- /dev/null
+++ b/pkg/common/log/log.go
@@ -0,0 +1,763 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package log provides a structured Logger interface implemented using zap logger. It provides the following capabilities:
+//1. Package level logging - a go package can register itself (AddPackage) and have a logger created for that package.
+//2. Dynamic log level change - for all registered packages (SetAllLogLevel)
+//3. Dynamic log level change - for a given package (SetPackageLogLevel)
+//4. Provides a default logger for unregistered packages
+//5. Allow key-value pairs to be added to a logger(UpdateLogger) or all loggers (UpdateAllLoggers) at run time
+//6. Add to the log output the location where the log was invoked (filename.functionname.linenumber)
+//
+// Using package-level logging (recommended approach). In the examples below, log refers to this log package.
+// 1. In the appropriate package add the following in the init section of the package. The log level can be changed
+// and any number of default fields can be added as well. The log level specifies the lowest log level that will be
+// in the output while the fields will be automatically added to all log printouts.
+//
+// log.AddPackage(mylog.JSON, log.WarnLevel, log.Fields{"anyFieldName": "any value"})
+//
+//2. In the calling package, just invoke any of the publicly available functions of the logger. Here is an example
+// to write an Info log with additional fields:
+//
+//log.Infow("An example", mylog.Fields{"myStringOutput": "output", "myIntOutput": 2})
+//
+//3. To dynamically change the log level, you can use 1)SetLogLevel from inside your package or 2) SetPackageLogLevel
+// from anywhere or 3) SetAllLogLevel from anywhere.
+//
+
+package log
+
+import (
+ "errors"
+ "fmt"
+ zp "go.uber.org/zap"
+ zc "go.uber.org/zap/zapcore"
+ "path"
+ "runtime"
+ "strings"
+)
+
+const (
+ // DebugLevel logs a message at debug level
+ DebugLevel = iota
+ // InfoLevel logs a message at info level
+ InfoLevel
+ // WarnLevel logs a message at warning level
+ WarnLevel
+ // ErrorLevel logs a message at error level
+ ErrorLevel
+ // PanicLevel logs a message, then panics.
+ PanicLevel
+ // FatalLevel logs a message, then calls os.Exit(1).
+ FatalLevel
+)
+
+// CONSOLE formats the log for the console, mostly used during development
+const CONSOLE = "console"
+
+// JSON formats the log using json format, mostly used by an automated logging system consumption
+const JSON = "json"
+
+// Logger represents an abstract logging interface. Any logging implementation used
+// will need to abide by this interface
+type Logger interface {
+ Debug(...interface{})
+ Debugln(...interface{})
+ Debugf(string, ...interface{})
+ Debugw(string, Fields)
+
+ Info(...interface{})
+ Infoln(...interface{})
+ Infof(string, ...interface{})
+ Infow(string, Fields)
+
+ Warn(...interface{})
+ Warnln(...interface{})
+ Warnf(string, ...interface{})
+ Warnw(string, Fields)
+
+ Error(...interface{})
+ Errorln(...interface{})
+ Errorf(string, ...interface{})
+ Errorw(string, Fields)
+
+ Fatal(...interface{})
+ Fatalln(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalw(string, Fields)
+
+ With(Fields) Logger
+
+ // The following are added to be able to use this logger as a gRPC LoggerV2 if needed
+ //
+ Warning(...interface{})
+ Warningln(...interface{})
+ Warningf(string, ...interface{})
+
+ // V reports whether verbosity level l is at least the requested verbose level.
+ V(l int) bool
+}
+
+// Fields is used as key-value pairs for structured logging
+type Fields map[string]interface{}
+
+var defaultLogger *logger
+var cfg zp.Config
+
+var loggers map[string]*logger
+var cfgs map[string]zp.Config
+
+type logger struct {
+ log *zp.SugaredLogger
+ parent *zp.Logger
+}
+
+func intToAtomicLevel(l int) zp.AtomicLevel {
+ switch l {
+ case DebugLevel:
+ return zp.NewAtomicLevelAt(zc.DebugLevel)
+ case InfoLevel:
+ return zp.NewAtomicLevelAt(zc.InfoLevel)
+ case WarnLevel:
+ return zp.NewAtomicLevelAt(zc.WarnLevel)
+ case ErrorLevel:
+ return zp.NewAtomicLevelAt(zc.ErrorLevel)
+ case PanicLevel:
+ return zp.NewAtomicLevelAt(zc.PanicLevel)
+ case FatalLevel:
+ return zp.NewAtomicLevelAt(zc.FatalLevel)
+ }
+ return zp.NewAtomicLevelAt(zc.ErrorLevel)
+}
+
+func intToLevel(l int) zc.Level {
+ switch l {
+ case DebugLevel:
+ return zc.DebugLevel
+ case InfoLevel:
+ return zc.InfoLevel
+ case WarnLevel:
+ return zc.WarnLevel
+ case ErrorLevel:
+ return zc.ErrorLevel
+ case PanicLevel:
+ return zc.PanicLevel
+ case FatalLevel:
+ return zc.FatalLevel
+ }
+ return zc.ErrorLevel
+}
+
+func levelToInt(l zc.Level) int {
+ switch l {
+ case zc.DebugLevel:
+ return DebugLevel
+ case zc.InfoLevel:
+ return InfoLevel
+ case zc.WarnLevel:
+ return WarnLevel
+ case zc.ErrorLevel:
+ return ErrorLevel
+ case zc.PanicLevel:
+ return PanicLevel
+ case FatalLevel:
+ return FatalLevel
+ }
+ return ErrorLevel
+}
+
+func getDefaultConfig(outputType string, level int, defaultFields Fields) zp.Config {
+ return zp.Config{
+ Level: intToAtomicLevel(level),
+ Encoding: outputType,
+ Development: true,
+ OutputPaths: []string{"stdout"},
+ ErrorOutputPaths: []string{"stderr"},
+ InitialFields: defaultFields,
+ EncoderConfig: zc.EncoderConfig{
+ LevelKey: "level",
+ MessageKey: "msg",
+ TimeKey: "ts",
+ StacktraceKey: "stacktrace",
+ LineEnding: zc.DefaultLineEnding,
+ EncodeLevel: zc.LowercaseLevelEncoder,
+ EncodeTime: zc.ISO8601TimeEncoder,
+ EncodeDuration: zc.SecondsDurationEncoder,
+ EncodeCaller: zc.ShortCallerEncoder,
+ },
+ }
+}
+
+// SetLogger needs to be invoked before the logger API can be invoked. This function
+// initialize the default logger (zap's sugaredlogger)
+func SetDefaultLogger(outputType string, level int, defaultFields Fields) (Logger, error) {
+ // Build a custom config using zap
+ cfg = getDefaultConfig(outputType, level, defaultFields)
+
+ l, err := cfg.Build()
+ if err != nil {
+ return nil, err
+ }
+
+ defaultLogger = &logger{
+ log: l.Sugar(),
+ parent: l,
+ }
+
+ return defaultLogger, nil
+}
+
+// AddPackage registers a package to the log map. Each package gets its own logger which allows
+// its config (loglevel) to be changed dynamically without interacting with the other packages.
+// outputType is JSON, level is the lowest level log to output with this logger and defaultFields is a map of
+// key-value pairs to always add to the output.
+// Note: AddPackage also returns a reference to the actual logger. If a calling package uses this reference directly
+//instead of using the publicly available functions in this log package then a number of functionalities will not
+// be available to it, notably log tracing with filename.functionname.linenumber annotation.
+//
+// pkgNames parameter should be used for testing only as this function detects the caller's package.
+func AddPackage(outputType string, level int, defaultFields Fields, pkgNames ...string) (Logger, error) {
+ if cfgs == nil {
+ cfgs = make(map[string]zp.Config)
+ }
+ if loggers == nil {
+ loggers = make(map[string]*logger)
+ }
+
+ var pkgName string
+ for _, name := range pkgNames {
+ pkgName = name
+ break
+ }
+ if pkgName == "" {
+ pkgName, _, _, _ = getCallerInfo()
+ }
+
+ if _, exist := loggers[pkgName]; exist {
+ return loggers[pkgName], nil
+ }
+
+ cfgs[pkgName] = getDefaultConfig(outputType, level, defaultFields)
+
+ l, err := cfgs[pkgName].Build()
+ if err != nil {
+ return nil, err
+ }
+
+ loggers[pkgName] = &logger{
+ log: l.Sugar(),
+ parent: l,
+ }
+ return loggers[pkgName], nil
+}
+
+//UpdateAllLoggers create new loggers for all registered pacakges with the defaultFields.
+func UpdateAllLoggers(defaultFields Fields) error {
+ for pkgName, cfg := range cfgs {
+ for k, v := range defaultFields {
+ if cfg.InitialFields == nil {
+ cfg.InitialFields = make(map[string]interface{})
+ }
+ cfg.InitialFields[k] = v
+ }
+ l, err := cfg.Build()
+ if err != nil {
+ return err
+ }
+
+ loggers[pkgName] = &logger{
+ log: l.Sugar(),
+ parent: l,
+ }
+ }
+ return nil
+}
+
+// Return a list of all packages that have individually-configured loggers
+func GetPackageNames() []string {
+ i := 0
+ keys := make([]string, len(loggers))
+ for k := range loggers {
+ keys[i] = k
+ i++
+ }
+ return keys
+}
+
+// UpdateLogger deletes the logger associated with a caller's package and creates a new logger with the
+// defaultFields. If a calling package is holding on to a Logger reference obtained from AddPackage invocation, then
+// that package needs to invoke UpdateLogger if it needs to make changes to the default fields and obtain a new logger
+// reference
+func UpdateLogger(defaultFields Fields) (Logger, error) {
+ pkgName, _, _, _ := getCallerInfo()
+ if _, exist := loggers[pkgName]; !exist {
+ return nil, errors.New(fmt.Sprintf("package-%s-not-registered", pkgName))
+ }
+
+ // Build a new logger
+ if _, exist := cfgs[pkgName]; !exist {
+ return nil, errors.New(fmt.Sprintf("config-%s-not-registered", pkgName))
+ }
+
+ cfg := cfgs[pkgName]
+ for k, v := range defaultFields {
+ if cfg.InitialFields == nil {
+ cfg.InitialFields = make(map[string]interface{})
+ }
+ cfg.InitialFields[k] = v
+ }
+ l, err := cfg.Build()
+ if err != nil {
+ return nil, err
+ }
+
+ // Set the logger
+ loggers[pkgName] = &logger{
+ log: l.Sugar(),
+ parent: l,
+ }
+ return loggers[pkgName], nil
+}
+
+func setLevel(cfg zp.Config, level int) {
+ switch level {
+ case DebugLevel:
+ cfg.Level.SetLevel(zc.DebugLevel)
+ case InfoLevel:
+ cfg.Level.SetLevel(zc.InfoLevel)
+ case WarnLevel:
+ cfg.Level.SetLevel(zc.WarnLevel)
+ case ErrorLevel:
+ cfg.Level.SetLevel(zc.ErrorLevel)
+ case PanicLevel:
+ cfg.Level.SetLevel(zc.PanicLevel)
+ case FatalLevel:
+ cfg.Level.SetLevel(zc.FatalLevel)
+ default:
+ cfg.Level.SetLevel(zc.ErrorLevel)
+ }
+}
+
+//SetPackageLogLevel dynamically sets the log level of a given package to level. This is typically invoked at an
+// application level during debugging
+func SetPackageLogLevel(packageName string, level int) {
+ // Get proper config
+ if cfg, ok := cfgs[packageName]; ok {
+ setLevel(cfg, level)
+ }
+}
+
+//SetAllLogLevel sets the log level of all registered packages to level
+func SetAllLogLevel(level int) {
+ // Get proper config
+ for _, cfg := range cfgs {
+ setLevel(cfg, level)
+ }
+}
+
+//GetPackageLogLevel returns the current log level of a package.
+func GetPackageLogLevel(packageName ...string) (int, error) {
+ var name string
+ if len(packageName) == 1 {
+ name = packageName[0]
+ } else {
+ name, _, _, _ = getCallerInfo()
+ }
+ if cfg, ok := cfgs[name]; ok {
+ return levelToInt(cfg.Level.Level()), nil
+ }
+ return 0, errors.New(fmt.Sprintf("unknown-package-%s", name))
+}
+
+//GetDefaultLogLevel gets the log level used for packages that don't have specific loggers
+func GetDefaultLogLevel() int {
+ return levelToInt(cfg.Level.Level())
+}
+
+//SetLogLevel sets the log level for the logger corresponding to the caller's package
+func SetLogLevel(level int) error {
+ pkgName, _, _, _ := getCallerInfo()
+ if _, exist := cfgs[pkgName]; !exist {
+ return errors.New(fmt.Sprintf("unregistered-package-%s", pkgName))
+ }
+ cfg := cfgs[pkgName]
+ setLevel(cfg, level)
+ return nil
+}
+
+//SetDefaultLogLevel sets the log level used for packages that don't have specific loggers
+func SetDefaultLogLevel(level int) {
+ setLevel(cfg, level)
+}
+
+// CleanUp flushed any buffered log entries. Applications should take care to call
+// CleanUp before exiting.
+func CleanUp() error {
+ for _, logger := range loggers {
+ if logger != nil {
+ if logger.parent != nil {
+ if err := logger.parent.Sync(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ if defaultLogger != nil {
+ if defaultLogger.parent != nil {
+ if err := defaultLogger.parent.Sync(); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func getCallerInfo() (string, string, string, int) {
+ // Since the caller of a log function is one stack frame before (in terms of stack higher level) the log.go
+ // filename, then first look for the last log.go filename and then grab the caller info one level higher.
+ maxLevel := 3
+ skiplevel := 3 // Level with the most empirical success to see the last log.go stack frame.
+ pc := make([]uintptr, maxLevel)
+ n := runtime.Callers(skiplevel, pc)
+ packageName := ""
+ funcName := ""
+ fileName := ""
+ var line int
+ if n == 0 {
+ return packageName, fileName, funcName, line
+ }
+ frames := runtime.CallersFrames(pc[:n])
+ var frame runtime.Frame
+ var foundFrame runtime.Frame
+ more := true
+ for more {
+ frame, more = frames.Next()
+ _, fileName = path.Split(frame.File)
+ if fileName != "log.go" {
+ foundFrame = frame // First frame after log.go in the frame stack
+ break
+ }
+ }
+ parts := strings.Split(foundFrame.Function, ".")
+ pl := len(parts)
+ if pl >= 2 {
+ funcName = parts[pl-1]
+ if parts[pl-2][0] == '(' {
+ packageName = strings.Join(parts[0:pl-2], ".")
+ } else {
+ packageName = strings.Join(parts[0:pl-1], ".")
+ }
+ }
+
+ if strings.HasSuffix(packageName, ".init") {
+ packageName = strings.TrimSuffix(packageName, ".init")
+ }
+
+ if strings.HasSuffix(fileName, ".go") {
+ fileName = strings.TrimSuffix(fileName, ".go")
+ }
+
+ return packageName, fileName, funcName, foundFrame.Line
+}
+
+func getPackageLevelSugaredLogger() *zp.SugaredLogger {
+ pkgName, fileName, funcName, line := getCallerInfo()
+ if _, exist := loggers[pkgName]; exist {
+ return loggers[pkgName].log.With("caller", fmt.Sprintf("%s.%s:%d", fileName, funcName, line))
+ }
+ return defaultLogger.log.With("caller", fmt.Sprintf("%s.%s:%d", fileName, funcName, line))
+}
+
+func getPackageLevelLogger() Logger {
+ pkgName, _, _, _ := getCallerInfo()
+ if _, exist := loggers[pkgName]; exist {
+ return loggers[pkgName]
+ }
+ return defaultLogger
+}
+
+func serializeMap(fields Fields) []interface{} {
+ data := make([]interface{}, len(fields)*2)
+ i := 0
+ for k, v := range fields {
+ data[i] = k
+ data[i+1] = v
+ i = i + 2
+ }
+ return data
+}
+
+// With returns a logger initialized with the key-value pairs
+func (l logger) With(keysAndValues Fields) Logger {
+ return logger{log: l.log.With(serializeMap(keysAndValues)...), parent: l.parent}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l logger) Debug(args ...interface{}) {
+ l.log.Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger with a line feed. Default in any case.
+func (l logger) Debugln(args ...interface{}) {
+ l.log.Debug(args...)
+}
+
+// Debugw logs a message at level Debug on the standard logger.
+func (l logger) Debugf(format string, args ...interface{}) {
+ l.log.Debugf(format, args...)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Debugw(msg string, keysAndValues Fields) {
+ l.log.Debugw(msg, serializeMap(keysAndValues)...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l logger) Info(args ...interface{}) {
+ l.log.Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger with a line feed. Default in any case.
+func (l logger) Infoln(args ...interface{}) {
+ l.log.Info(args...)
+ //msg := fmt.Sprintln(args...)
+ //l.sourced().Info(msg[:len(msg)-1])
+}
+
+// Infof logs a message at level Info on the standard logger.
+func (l logger) Infof(format string, args ...interface{}) {
+ l.log.Infof(format, args...)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Infow(msg string, keysAndValues Fields) {
+ l.log.Infow(msg, serializeMap(keysAndValues)...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l logger) Warn(args ...interface{}) {
+ l.log.Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l logger) Warnln(args ...interface{}) {
+ l.log.Warn(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func (l logger) Warnf(format string, args ...interface{}) {
+ l.log.Warnf(format, args...)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Warnw(msg string, keysAndValues Fields) {
+ l.log.Warnw(msg, serializeMap(keysAndValues)...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l logger) Error(args ...interface{}) {
+ l.log.Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger with a line feed. Default in any case.
+func (l logger) Errorln(args ...interface{}) {
+ l.log.Error(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func (l logger) Errorf(format string, args ...interface{}) {
+ l.log.Errorf(format, args...)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Errorw(msg string, keysAndValues Fields) {
+ l.log.Errorw(msg, serializeMap(keysAndValues)...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l logger) Fatal(args ...interface{}) {
+ l.log.Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger with a line feed. Default in any case.
+func (l logger) Fatalln(args ...interface{}) {
+ l.log.Fatal(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func (l logger) Fatalf(format string, args ...interface{}) {
+ l.log.Fatalf(format, args...)
+}
+
+// Fatalw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Fatalw(msg string, keysAndValues Fields) {
+ l.log.Fatalw(msg, serializeMap(keysAndValues)...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func (l logger) Warning(args ...interface{}) {
+ l.log.Warn(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l logger) Warningln(args ...interface{}) {
+ l.log.Warn(args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func (l logger) Warningf(format string, args ...interface{}) {
+ l.log.Warnf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (l logger) V(level int) bool {
+ return l.parent.Core().Enabled(intToLevel(level))
+}
+
+// With returns a logger initialized with the key-value pairs
+func With(keysAndValues Fields) Logger {
+ return logger{log: getPackageLevelSugaredLogger().With(serializeMap(keysAndValues)...), parent: defaultLogger.parent}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ getPackageLevelSugaredLogger().Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ getPackageLevelSugaredLogger().Debug(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ getPackageLevelSugaredLogger().Debugf(format, args...)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func Debugw(msg string, keysAndValues Fields) {
+ getPackageLevelSugaredLogger().Debugw(msg, serializeMap(keysAndValues)...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ getPackageLevelSugaredLogger().Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ getPackageLevelSugaredLogger().Info(args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ getPackageLevelSugaredLogger().Infof(format, args...)
+}
+
+//Infow logs a message with some additional context. The variadic key-value
+//pairs are treated as they are in With.
+func Infow(msg string, keysAndValues Fields) {
+ getPackageLevelSugaredLogger().Infow(msg, serializeMap(keysAndValues)...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ getPackageLevelSugaredLogger().Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ getPackageLevelSugaredLogger().Warn(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ getPackageLevelSugaredLogger().Warnf(format, args...)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func Warnw(msg string, keysAndValues Fields) {
+ getPackageLevelSugaredLogger().Warnw(msg, serializeMap(keysAndValues)...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ getPackageLevelSugaredLogger().Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ getPackageLevelSugaredLogger().Error(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ getPackageLevelSugaredLogger().Errorf(format, args...)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func Errorw(msg string, keysAndValues Fields) {
+ getPackageLevelSugaredLogger().Errorw(msg, serializeMap(keysAndValues)...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ getPackageLevelSugaredLogger().Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ getPackageLevelSugaredLogger().Fatal(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ getPackageLevelSugaredLogger().Fatalf(format, args...)
+}
+
+// Fatalw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func Fatalw(msg string, keysAndValues Fields) {
+ getPackageLevelSugaredLogger().Fatalw(msg, serializeMap(keysAndValues)...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ getPackageLevelSugaredLogger().Warn(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ getPackageLevelSugaredLogger().Warn(args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ getPackageLevelSugaredLogger().Warnf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func V(level int) bool {
+ return getPackageLevelLogger().V(level)
+}
diff --git a/pkg/common/log/log_test.go b/pkg/common/log/log_test.go
new file mode 100644
index 0000000..88794b2
--- /dev/null
+++ b/pkg/common/log/log_test.go
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package log
+
+import (
+ "github.com/stretchr/testify/assert"
+ "google.golang.org/grpc/grpclog"
+ "testing"
+)
+
+/*
+Prerequite: Start the kafka/zookeeper containers.
+*/
+
+var testLogger Logger
+
+func TestInit(t *testing.T) {
+ var err error
+ testLogger, err = AddPackage(JSON, ErrorLevel, nil)
+ assert.NotNil(t, testLogger)
+ assert.Nil(t, err)
+}
+
+func verifyLogLevel(t *testing.T, minimumLevel int) {
+ SetAllLogLevel(minimumLevel)
+ var success bool
+ for i := 0; i < 6; i++ {
+ success = testLogger.V(i)
+ if i == 1 && minimumLevel == 2 {
+ // TODO: Update the test when a new version of Zap logger is available. It has a bug with that
+ // specific combination
+ continue
+ }
+ if i < minimumLevel {
+ assert.False(t, success)
+ } else {
+ assert.True(t, success)
+ }
+ }
+}
+
+func TestLogLevelDebug(t *testing.T) {
+ for i := 0; i < 6; i++ {
+ verifyLogLevel(t, i)
+ }
+}
+
+func TestUpdateAllLoggers(t *testing.T) {
+ err := UpdateAllLoggers(Fields{"update": "update"})
+ assert.Nil(t, err)
+}
+
+func TestUpdateLoggers(t *testing.T) {
+ testLogger, err := UpdateLogger(Fields{"update": "update"})
+ assert.Nil(t, err)
+ assert.NotNil(t, testLogger)
+}
+
+func TestUseAsGrpcLoggerV2(t *testing.T) {
+ var grpcLogger grpclog.LoggerV2
+ thisLogger, _ := AddPackage(JSON, ErrorLevel, nil)
+ grpcLogger = thisLogger
+ assert.NotNil(t, grpcLogger)
+}
+
+func TestUpdateLogLevel(t *testing.T) {
+ // Let's create a bunch of logger each with a separate package
+ myLoggers := make(map[string]Logger)
+ pkgNames := []string{"/rw_core/core", "/db/model", "/kafka"}
+ for _, name := range pkgNames {
+ myLoggers[name], _ = AddPackage(JSON, ErrorLevel, nil, []string{name}...)
+ }
+ //Test updates to log levels
+ levels := []int{0, 1, 2, 3, 4, 5}
+ for _, expectedLevel := range levels {
+ for _, name := range pkgNames {
+ SetPackageLogLevel(name, expectedLevel)
+ l, err := GetPackageLogLevel(name)
+ assert.Nil(t, err)
+ assert.Equal(t, l, expectedLevel)
+ }
+ }
+ //Test set all package level
+ for _, expectedLevel := range levels {
+ SetAllLogLevel(expectedLevel)
+ for _, name := range pkgNames {
+ l, err := GetPackageLogLevel(name)
+ assert.Nil(t, err)
+ assert.Equal(t, l, expectedLevel)
+ }
+ }
+}
diff --git a/pkg/common/ponresourcemanager/ponresourcemanager.go b/pkg/common/ponresourcemanager/ponresourcemanager.go
new file mode 100755
index 0000000..69156ae
--- /dev/null
+++ b/pkg/common/ponresourcemanager/ponresourcemanager.go
@@ -0,0 +1,1180 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ponresourcemanager
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+
+ bitmap "github.com/boljen/go-bitmap"
+ "github.com/opencord/voltha-lib-go/pkg/common/log"
+ tp "github.com/opencord/voltha-lib-go/pkg/common/techprofile"
+ "github.com/opencord/voltha-lib-go/pkg/db/kvstore"
+ "github.com/opencord/voltha-lib-go/pkg/db/model"
+)
+
+const (
+ //Constants to identify resource pool
+ UNI_ID = "UNI_ID"
+ ONU_ID = "ONU_ID"
+ ALLOC_ID = "ALLOC_ID"
+ GEMPORT_ID = "GEMPORT_ID"
+ FLOW_ID = "FLOW_ID"
+
+ //Constants for passing command line arugments
+ OLT_MODEL_ARG = "--olt_model"
+ PATH_PREFIX = "service/voltha/resource_manager/{%s}"
+ /*The resource ranges for a given device model should be placed
+ at 'resource_manager/<technology>/resource_ranges/<olt_model_type>'
+ path on the KV store.
+ If Resource Range parameters are to be read from the external KV store,
+ they are expected to be stored in the following format.
+ Note: All parameters are MANDATORY for now.
+ constants used as keys to reference the resource range parameters from
+ and external KV store.
+ */
+ UNI_ID_START_IDX = "uni_id_start"
+ UNI_ID_END_IDX = "uni_id_end"
+ ONU_ID_START_IDX = "onu_id_start"
+ ONU_ID_END_IDX = "onu_id_end"
+ ONU_ID_SHARED_IDX = "onu_id_shared"
+ ALLOC_ID_START_IDX = "alloc_id_start"
+ ALLOC_ID_END_IDX = "alloc_id_end"
+ ALLOC_ID_SHARED_IDX = "alloc_id_shared"
+ GEMPORT_ID_START_IDX = "gemport_id_start"
+ GEMPORT_ID_END_IDX = "gemport_id_end"
+ GEMPORT_ID_SHARED_IDX = "gemport_id_shared"
+ FLOW_ID_START_IDX = "flow_id_start"
+ FLOW_ID_END_IDX = "flow_id_end"
+ FLOW_ID_SHARED_IDX = "flow_id_shared"
+ NUM_OF_PON_PORT = "pon_ports"
+
+ /*
+ The KV store backend is initialized with a path prefix and we need to
+ provide only the suffix.
+ */
+ PON_RESOURCE_RANGE_CONFIG_PATH = "resource_ranges/%s"
+
+ //resource path suffix
+ //Path on the KV store for storing alloc id ranges and resource pool for a given interface
+ //Format: <device_id>/alloc_id_pool/<pon_intf_id>
+ ALLOC_ID_POOL_PATH = "{%s}/alloc_id_pool/{%d}"
+ //Path on the KV store for storing gemport id ranges and resource pool for a given interface
+ //Format: <device_id>/gemport_id_pool/<pon_intf_id>
+ GEMPORT_ID_POOL_PATH = "{%s}/gemport_id_pool/{%d}"
+ //Path on the KV store for storing onu id ranges and resource pool for a given interface
+ //Format: <device_id>/onu_id_pool/<pon_intf_id>
+ ONU_ID_POOL_PATH = "{%s}/onu_id_pool/{%d}"
+ //Path on the KV store for storing flow id ranges and resource pool for a given interface
+ //Format: <device_id>/flow_id_pool/<pon_intf_id>
+ FLOW_ID_POOL_PATH = "{%s}/flow_id_pool/{%d}"
+
+ //Path on the KV store for storing list of alloc IDs for a given ONU
+ //Format: <device_id>/<(pon_intf_id, onu_id)>/alloc_ids
+ ALLOC_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/alloc_ids"
+
+ //Path on the KV store for storing list of gemport IDs for a given ONU
+ //Format: <device_id>/<(pon_intf_id, onu_id)>/gemport_ids
+ GEMPORT_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/gemport_ids"
+
+ //Path on the KV store for storing list of Flow IDs for a given ONU
+ //Format: <device_id>/<(pon_intf_id, onu_id)>/flow_ids
+ FLOW_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/flow_ids"
+
+ //Flow Id info: Use to store more metadata associated with the flow_id
+ //Format: <device_id>/<(pon_intf_id, onu_id)>/flow_id_info/<flow_id>
+ FLOW_ID_INFO_PATH = "{%s}/{%s}/flow_id_info/{%d}"
+
+ //Constants for internal usage.
+ PON_INTF_ID = "pon_intf_id"
+ START_IDX = "start_idx"
+ END_IDX = "end_idx"
+ POOL = "pool"
+ NUM_OF_PON_INTF = 16
+
+ KVSTORE_RETRY_TIMEOUT = 5
+)
+
+//type ResourceTypeIndex string
+//type ResourceType string
+
+type PONResourceManager struct {
+ //Implements APIs to initialize/allocate/release alloc/gemport/onu IDs.
+ Technology string
+ DeviceType string
+ DeviceID string
+ Backend string // ETCD, or consul
+ Host string // host ip of the KV store
+ Port int // port number for the KV store
+ OLTModel string
+ KVStore *model.Backend
+ TechProfileMgr tp.TechProfileIf // create object of *tp.TechProfileMgr
+
+ // Below attribute, pon_resource_ranges, should be initialized
+ // by reading from KV store.
+ PonResourceRanges map[string]interface{}
+ SharedResourceMgrs map[string]*PONResourceManager
+ SharedIdxByType map[string]string
+ IntfIDs []uint32 // list of pon interface IDs
+ Globalorlocal string
+}
+
+func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
+ log.Infow("kv-store-type", log.Fields{"store": storeType})
+ switch storeType {
+ case "consul":
+ return kvstore.NewConsulClient(address, timeout)
+ case "etcd":
+ return kvstore.NewEtcdClient(address, timeout)
+ }
+ return nil, errors.New("unsupported-kv-store")
+}
+
+func SetKVClient(Technology string, Backend string, Host string, Port int) *model.Backend {
+ addr := Host + ":" + strconv.Itoa(Port)
+ // TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
+ // issue between kv store and backend , core is not calling NewBackend directly
+ kvClient, err := newKVClient(Backend, addr, KVSTORE_RETRY_TIMEOUT)
+ if err != nil {
+ log.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
+ return nil
+ }
+ kvbackend := &model.Backend{
+ Client: kvClient,
+ StoreType: Backend,
+ Host: Host,
+ Port: Port,
+ Timeout: KVSTORE_RETRY_TIMEOUT,
+ PathPrefix: fmt.Sprintf(PATH_PREFIX, Technology)}
+
+ return kvbackend
+}
+
+// NewPONResourceManager creates a new PON resource manager.
+func NewPONResourceManager(Technology string, DeviceType string, DeviceID string, Backend string, Host string, Port int) (*PONResourceManager, error) {
+ var PONMgr PONResourceManager
+ PONMgr.Technology = Technology
+ PONMgr.DeviceType = DeviceType
+ PONMgr.DeviceID = DeviceID
+ PONMgr.Backend = Backend
+ PONMgr.Host = Host
+ PONMgr.Port = Port
+ PONMgr.KVStore = SetKVClient(Technology, Backend, Host, Port)
+ if PONMgr.KVStore == nil {
+ log.Error("KV Client initilization failed")
+ return nil, errors.New("Failed to init KV client")
+ }
+ // Initialize techprofile for this technology
+ if PONMgr.TechProfileMgr, _ = tp.NewTechProfile(&PONMgr, Backend, Host, Port); PONMgr.TechProfileMgr == nil {
+ log.Error("Techprofile initialization failed")
+ return nil, errors.New("Failed to init tech profile")
+ }
+ PONMgr.PonResourceRanges = make(map[string]interface{})
+ PONMgr.SharedResourceMgrs = make(map[string]*PONResourceManager)
+ PONMgr.SharedIdxByType = make(map[string]string)
+ PONMgr.SharedIdxByType[ONU_ID] = ONU_ID_SHARED_IDX
+ PONMgr.SharedIdxByType[ALLOC_ID] = ALLOC_ID_SHARED_IDX
+ PONMgr.SharedIdxByType[GEMPORT_ID] = GEMPORT_ID_SHARED_IDX
+ PONMgr.SharedIdxByType[FLOW_ID] = FLOW_ID_SHARED_IDX
+ PONMgr.IntfIDs = make([]uint32, NUM_OF_PON_INTF)
+ PONMgr.OLTModel = DeviceType
+ return &PONMgr, nil
+}
+
+/*
+ Initialize PON resource ranges with config fetched from kv store.
+ return boolean: True if PON resource ranges initialized else false
+ Try to initialize the PON Resource Ranges from KV store based on the
+ OLT model key, if available
+*/
+
+func (PONRMgr *PONResourceManager) InitResourceRangesFromKVStore() bool {
+ //Initialize PON resource ranges with config fetched from kv store.
+ //:return boolean: True if PON resource ranges initialized else false
+ // Try to initialize the PON Resource Ranges from KV store based on the
+ // OLT model key, if available
+ if PONRMgr.OLTModel == "" {
+ log.Error("Failed to get OLT model")
+ return false
+ }
+ Path := fmt.Sprintf(PON_RESOURCE_RANGE_CONFIG_PATH, PONRMgr.OLTModel)
+ //get resource from kv store
+ Result, err := PONRMgr.KVStore.Get(Path)
+ if err != nil {
+ log.Debugf("Error in fetching resource %s from KV strore", Path)
+ return false
+ }
+ if Result == nil {
+ log.Debug("There may be no resources in the KV store in case of fresh bootup, return true")
+ return false
+ }
+ //update internal ranges from kv ranges. If there are missing
+ // values in the KV profile, continue to use the defaults
+ Value, err := ToByte(Result.Value)
+ if err != nil {
+ log.Error("Failed to convert kvpair to byte string")
+ return false
+ }
+ if err := json.Unmarshal(Value, &PONRMgr.PonResourceRanges); err != nil {
+ log.Error("Failed to Unmarshal json byte")
+ return false
+ }
+ log.Debug("Init resource ranges from kvstore success")
+ return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateRanges(StartIDx string, StartID uint32, EndIDx string, EndID uint32,
+ SharedIDx string, SharedPoolID uint32, RMgr *PONResourceManager) {
+ /*
+ Update the ranges for all reosurce type in the intermnal maps
+ param: resource type start index
+ param: start ID
+ param: resource type end index
+ param: end ID
+ param: resource type shared index
+ param: shared pool id
+ param: global resource manager
+ */
+ log.Debugf("update ranges for %s, %d", StartIDx, StartID)
+
+ if StartID != 0 {
+ if (PONRMgr.PonResourceRanges[StartIDx] == nil) || (PONRMgr.PonResourceRanges[StartIDx].(uint32) < StartID) {
+ PONRMgr.PonResourceRanges[StartIDx] = StartID
+ }
+ }
+ if EndID != 0 {
+ if (PONRMgr.PonResourceRanges[EndIDx] == nil) || (PONRMgr.PonResourceRanges[EndIDx].(uint32) > EndID) {
+ PONRMgr.PonResourceRanges[EndIDx] = EndID
+ }
+ }
+ //if SharedPoolID != 0 {
+ PONRMgr.PonResourceRanges[SharedIDx] = SharedPoolID
+ //}
+ if RMgr != nil {
+ PONRMgr.SharedResourceMgrs[SharedIDx] = RMgr
+ }
+}
+
+func (PONRMgr *PONResourceManager) InitDefaultPONResourceRanges(ONUIDStart uint32,
+ ONUIDEnd uint32,
+ ONUIDSharedPoolID uint32,
+ AllocIDStart uint32,
+ AllocIDEnd uint32,
+ AllocIDSharedPoolID uint32,
+ GEMPortIDStart uint32,
+ GEMPortIDEnd uint32,
+ GEMPortIDSharedPoolID uint32,
+ FlowIDStart uint32,
+ FlowIDEnd uint32,
+ FlowIDSharedPoolID uint32,
+ UNIIDStart uint32,
+ UNIIDEnd uint32,
+ NoOfPONPorts uint32,
+ IntfIDs []uint32) bool {
+
+ /*Initialize default PON resource ranges
+
+ :param onu_id_start_idx: onu id start index
+ :param onu_id_end_idx: onu id end index
+ :param onu_id_shared_pool_id: pool idx for id shared by all intfs or None for no sharing
+ :param alloc_id_start_idx: alloc id start index
+ :param alloc_id_end_idx: alloc id end index
+ :param alloc_id_shared_pool_id: pool idx for alloc id shared by all intfs or None for no sharing
+ :param gemport_id_start_idx: gemport id start index
+ :param gemport_id_end_idx: gemport id end index
+ :param gemport_id_shared_pool_id: pool idx for gemport id shared by all intfs or None for no sharing
+ :param flow_id_start_idx: flow id start index
+ :param flow_id_end_idx: flow id end index
+ :param flow_id_shared_pool_id: pool idx for flow id shared by all intfs or None for no sharing
+ :param num_of_pon_ports: number of PON ports
+ :param intf_ids: interfaces serviced by this manager
+ */
+ PONRMgr.UpdateRanges(ONU_ID_START_IDX, ONUIDStart, ONU_ID_END_IDX, ONUIDEnd, ONU_ID_SHARED_IDX, ONUIDSharedPoolID, nil)
+ PONRMgr.UpdateRanges(ALLOC_ID_START_IDX, AllocIDStart, ALLOC_ID_END_IDX, AllocIDEnd, ALLOC_ID_SHARED_IDX, AllocIDSharedPoolID, nil)
+ PONRMgr.UpdateRanges(GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
+ PONRMgr.UpdateRanges(FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
+ PONRMgr.UpdateRanges(UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
+ log.Debug("Initialize default range values")
+ var i uint32
+ if IntfIDs == nil {
+ for i = 0; i < NoOfPONPorts; i++ {
+ PONRMgr.IntfIDs = append(PONRMgr.IntfIDs, i)
+ }
+ } else {
+ PONRMgr.IntfIDs = IntfIDs
+ }
+ return true
+}
+
+func (PONRMgr *PONResourceManager) InitDeviceResourcePool() error {
+
+ //Initialize resource pool for all PON ports.
+
+ log.Debug("Init resource ranges")
+
+ var err error
+ for _, Intf := range PONRMgr.IntfIDs {
+ SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
+ if SharedPoolID != 0 {
+ Intf = SharedPoolID
+ }
+ if err = PONRMgr.InitResourceIDPool(Intf, ONU_ID,
+ PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
+ PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
+ log.Error("Failed to init ONU ID resource pool")
+ return err
+ }
+ if SharedPoolID != 0 {
+ break
+ }
+ }
+
+ for _, Intf := range PONRMgr.IntfIDs {
+ SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
+ if SharedPoolID != 0 {
+ Intf = SharedPoolID
+ }
+ if err = PONRMgr.InitResourceIDPool(Intf, ALLOC_ID,
+ PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
+ PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
+ log.Error("Failed to init ALLOC ID resource pool ")
+ return err
+ }
+ if SharedPoolID != 0 {
+ break
+ }
+ }
+ for _, Intf := range PONRMgr.IntfIDs {
+ SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
+ if SharedPoolID != 0 {
+ Intf = SharedPoolID
+ }
+ if err = PONRMgr.InitResourceIDPool(Intf, GEMPORT_ID,
+ PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
+ PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
+ log.Error("Failed to init GEMPORT ID resource pool")
+ return err
+ }
+ if SharedPoolID != 0 {
+ break
+ }
+ }
+
+ for _, Intf := range PONRMgr.IntfIDs {
+ SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
+ if SharedPoolID != 0 {
+ Intf = SharedPoolID
+ }
+ if err = PONRMgr.InitResourceIDPool(Intf, FLOW_ID,
+ PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
+ PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
+ log.Error("Failed to init FLOW ID resource pool")
+ return err
+ }
+ if SharedPoolID != 0 {
+ break
+ }
+ }
+ return err
+}
+
+func (PONRMgr *PONResourceManager) ClearDeviceResourcePool() error {
+
+ //Clear resource pool for all PON ports.
+
+ log.Debug("Clear resource ranges")
+
+ for _, Intf := range PONRMgr.IntfIDs {
+ SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
+ if SharedPoolID != 0 {
+ Intf = SharedPoolID
+ }
+ if status := PONRMgr.ClearResourceIDPool(Intf, ONU_ID); status != true {
+ log.Error("Failed to clear ONU ID resource pool")
+ return errors.New("Failed to clear ONU ID resource pool")
+ }
+ if SharedPoolID != 0 {
+ break
+ }
+ }
+
+ for _, Intf := range PONRMgr.IntfIDs {
+ SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
+ if SharedPoolID != 0 {
+ Intf = SharedPoolID
+ }
+ if status := PONRMgr.ClearResourceIDPool(Intf, ALLOC_ID); status != true {
+ log.Error("Failed to clear ALLOC ID resource pool ")
+ return errors.New("Failed to clear ALLOC ID resource pool")
+ }
+ if SharedPoolID != 0 {
+ break
+ }
+ }
+ for _, Intf := range PONRMgr.IntfIDs {
+ SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
+ if SharedPoolID != 0 {
+ Intf = SharedPoolID
+ }
+ if status := PONRMgr.ClearResourceIDPool(Intf, GEMPORT_ID); status != true {
+ log.Error("Failed to clear GEMPORT ID resource pool")
+ return errors.New("Failed to clear GEMPORT ID resource pool")
+ }
+ if SharedPoolID != 0 {
+ break
+ }
+ }
+
+ for _, Intf := range PONRMgr.IntfIDs {
+ SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
+ if SharedPoolID != 0 {
+ Intf = SharedPoolID
+ }
+ if status := PONRMgr.ClearResourceIDPool(Intf, FLOW_ID); status != true {
+ log.Error("Failed to clear FLOW ID resource pool")
+ return errors.New("Failed to clear FLOW ID resource pool")
+ }
+ if SharedPoolID != 0 {
+ break
+ }
+ }
+ return nil
+}
+
+func (PONRMgr *PONResourceManager) InitResourceIDPool(Intf uint32, ResourceType string, StartID uint32, EndID uint32) error {
+
+ /*Initialize Resource ID pool for a given Resource Type on a given PON Port
+
+ :param pon_intf_id: OLT PON interface id
+ :param resource_type: String to identify type of resource
+ :param start_idx: start index for onu id pool
+ :param end_idx: end index for onu id pool
+ :return boolean: True if resource id pool initialized else false
+ */
+
+ // delegate to the master instance if sharing enabled across instances
+ SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+ if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+ return SharedResourceMgr.InitResourceIDPool(Intf, ResourceType, StartID, EndID)
+ }
+
+ Path := PONRMgr.GetPath(Intf, ResourceType)
+ if Path == "" {
+ log.Errorf("Failed to get path for resource type %s", ResourceType)
+ return errors.New(fmt.Sprintf("Failed to get path for resource type %s", ResourceType))
+ }
+
+ //In case of adapter reboot and reconciliation resource in kv store
+ //checked for its presence if not kv store update happens
+ Res, err := PONRMgr.GetResource(Path)
+ if (err == nil) && (Res != nil) {
+ log.Debugf("Resource %s already present in store ", Path)
+ return nil
+ } else {
+ FormatResult, err := PONRMgr.FormatResource(Intf, StartID, EndID)
+ if err != nil {
+ log.Errorf("Failed to format resource")
+ return err
+ }
+ // Add resource as json in kv store.
+ err = PONRMgr.KVStore.Put(Path, FormatResult)
+ if err == nil {
+ log.Debug("Successfuly posted to kv store")
+ return err
+ }
+ }
+
+ log.Debug("Error initializing pool")
+
+ return err
+}
+
+func (PONRMgr *PONResourceManager) FormatResource(IntfID uint32, StartIDx uint32, EndIDx uint32) ([]byte, error) {
+ /*
+ Format resource as json.
+ :param pon_intf_id: OLT PON interface id
+ :param start_idx: start index for id pool
+ :param end_idx: end index for id pool
+ :return dictionary: resource formatted as map
+ */
+ // Format resource as json to be stored in backend store
+ Resource := make(map[string]interface{})
+ Resource[PON_INTF_ID] = IntfID
+ Resource[START_IDX] = StartIDx
+ Resource[END_IDX] = EndIDx
+ /*
+ Resource pool stored in backend store as binary string.
+ Tracking the resource allocation will be done by setting the bits \
+ in the byte array. The index set will be the resource number allocated.
+ */
+ var TSData *bitmap.Threadsafe
+ if TSData = bitmap.NewTS(int(EndIDx)); TSData == nil {
+ log.Error("Failed to create a bitmap")
+ return nil, errors.New("Failed to create bitmap")
+ }
+ Resource[POOL] = TSData.Data(false) //we pass false so as the TSData lib api does not do a copy of the data and return
+
+ Value, err := json.Marshal(Resource)
+ if err != nil {
+ log.Errorf("Failed to marshall resource")
+ return nil, err
+ }
+ return Value, err
+}
+func (PONRMgr *PONResourceManager) GetResource(Path string) (map[string]interface{}, error) {
+ /*
+ Get resource from kv store.
+
+ :param path: path to get resource
+ :return: resource if resource present in kv store else None
+ */
+ //get resource from kv store
+
+ var Value []byte
+ Result := make(map[string]interface{})
+ var Str string
+
+ Resource, err := PONRMgr.KVStore.Get(Path)
+ if (err != nil) || (Resource == nil) {
+ log.Debugf("Resource unavailable at %s", Path)
+ return nil, err
+ }
+
+ Value, err = ToByte(Resource.Value)
+
+ // decode resource fetched from backend store to dictionary
+ err = json.Unmarshal(Value, &Result)
+ if err != nil {
+ log.Error("Failed to decode resource")
+ return Result, err
+ }
+ /*
+ resource pool in backend store stored as binary string whereas to
+ access the pool to generate/release IDs it need to be converted
+ as BitArray
+ */
+ Str, err = ToString(Result[POOL])
+ if err != nil {
+ log.Error("Failed to conver to kv pair to string")
+ return Result, err
+ }
+ Decode64, _ := base64.StdEncoding.DecodeString(Str)
+ Result[POOL], err = ToByte(Decode64)
+ if err != nil {
+ log.Error("Failed to convert resource pool to byte")
+ return Result, err
+ }
+
+ return Result, err
+}
+
+func (PONRMgr *PONResourceManager) GetPath(IntfID uint32, ResourceType string) string {
+ /*
+ Get path for given resource type.
+ :param pon_intf_id: OLT PON interface id
+ :param resource_type: String to identify type of resource
+ :return: path for given resource type
+ */
+
+ /*
+ Get the shared pool for the given resource type.
+ all the resource ranges and the shared resource maps are initialized during the init.
+ */
+ SharedPoolID := PONRMgr.PonResourceRanges[PONRMgr.SharedIdxByType[ResourceType]].(uint32)
+ if SharedPoolID != 0 {
+ IntfID = SharedPoolID
+ }
+ var Path string
+ if ResourceType == ONU_ID {
+ Path = fmt.Sprintf(ONU_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+ } else if ResourceType == ALLOC_ID {
+ Path = fmt.Sprintf(ALLOC_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+ } else if ResourceType == GEMPORT_ID {
+ Path = fmt.Sprintf(GEMPORT_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+ } else if ResourceType == FLOW_ID {
+ Path = fmt.Sprintf(FLOW_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+ } else {
+ log.Error("Invalid resource pool identifier")
+ }
+ return Path
+}
+
+func (PONRMgr *PONResourceManager) GetResourceID(IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error) {
+ /*
+ Create alloc/gemport/onu/flow id for given OLT PON interface.
+ :param pon_intf_id: OLT PON interface id
+ :param resource_type: String to identify type of resource
+ :param num_of_id: required number of ids
+ :return list/uint32/None: list, uint32 or None if resource type is
+ alloc_id/gemport_id, onu_id or invalid type respectively
+ */
+ if NumIDs < 1 {
+ log.Error("Invalid number of resources requested")
+ return nil, errors.New(fmt.Sprintf("Invalid number of resources requested %d", NumIDs))
+ }
+ // delegate to the master instance if sharing enabled across instances
+
+ SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+ if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+ return SharedResourceMgr.GetResourceID(IntfID, ResourceType, NumIDs)
+ }
+ log.Debugf("Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
+
+ Path := PONRMgr.GetPath(IntfID, ResourceType)
+ if Path == "" {
+ log.Errorf("Failed to get path for resource type %s", ResourceType)
+ return nil, errors.New(fmt.Sprintf("Failed to get path for resource type %s", ResourceType))
+ }
+ log.Debugf("Get resource for type %s on path %s", ResourceType, Path)
+ var Result []uint32
+ var NextID uint32
+ Resource, err := PONRMgr.GetResource(Path)
+ if (err == nil) && (ResourceType == ONU_ID) || (ResourceType == FLOW_ID) {
+ if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+ log.Error("Failed to Generate ID")
+ return Result, err
+ }
+ Result = append(Result, NextID)
+ } else if (err == nil) && ((ResourceType == GEMPORT_ID) || (ResourceType == ALLOC_ID)) {
+ if NumIDs == 1 {
+ if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+ log.Error("Failed to Generate ID")
+ return Result, err
+ }
+ Result = append(Result, NextID)
+ } else {
+ for NumIDs > 0 {
+ if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+ log.Error("Failed to Generate ID")
+ return Result, err
+ }
+ Result = append(Result, NextID)
+ NumIDs--
+ }
+ }
+ } else {
+ log.Error("get resource failed")
+ return Result, err
+ }
+
+ //Update resource in kv store
+ if PONRMgr.UpdateResource(Path, Resource) != nil {
+ log.Errorf("Failed to update resource %s", Path)
+ return nil, errors.New(fmt.Sprintf("Failed to update resource %s", Path))
+ }
+ return Result, nil
+}
+
+func checkValidResourceType(ResourceType string) bool {
+ KnownResourceTypes := []string{ONU_ID, ALLOC_ID, GEMPORT_ID, FLOW_ID}
+
+ for _, v := range KnownResourceTypes {
+ if v == ResourceType {
+ return true
+ }
+ }
+ return false
+}
+
+func (PONRMgr *PONResourceManager) FreeResourceID(IntfID uint32, ResourceType string, ReleaseContent []uint32) bool {
+ /*
+ Release alloc/gemport/onu/flow id for given OLT PON interface.
+ :param pon_intf_id: OLT PON interface id
+ :param resource_type: String to identify type of resource
+ :param release_content: required number of ids
+ :return boolean: True if all IDs in given release_content release else False
+ */
+ if checkValidResourceType(ResourceType) == false {
+ log.Error("Invalid resource type")
+ return false
+ }
+ if ReleaseContent == nil {
+ log.Debug("Nothing to release")
+ return true
+ }
+ // delegate to the master instance if sharing enabled across instances
+ SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+ if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+ return SharedResourceMgr.FreeResourceID(IntfID, ResourceType, ReleaseContent)
+ }
+ Path := PONRMgr.GetPath(IntfID, ResourceType)
+ if Path == "" {
+ log.Error("Failed to get path")
+ return false
+ }
+ Resource, err := PONRMgr.GetResource(Path)
+ if err != nil {
+ log.Error("Failed to get resource")
+ return false
+ }
+ for _, Val := range ReleaseContent {
+ PONRMgr.ReleaseID(Resource, Val)
+ }
+ if PONRMgr.UpdateResource(Path, Resource) != nil {
+ log.Errorf("Free resource for %s failed", Path)
+ return false
+ }
+ return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateResource(Path string, Resource map[string]interface{}) error {
+ /*
+ Update resource in resource kv store.
+ :param path: path to update resource
+ :param resource: resource need to be updated
+ :return boolean: True if resource updated in kv store else False
+ */
+ // TODO resource[POOL] = resource[POOL].bin
+ Value, err := json.Marshal(Resource)
+ if err != nil {
+ log.Error("failed to Marshal")
+ return err
+ }
+ err = PONRMgr.KVStore.Put(Path, Value)
+ if err != nil {
+ log.Error("failed to put data to kv store %s", Path)
+ return err
+ }
+ return nil
+}
+
+func (PONRMgr *PONResourceManager) ClearResourceIDPool(IntfID uint32, ResourceType string) bool {
+ /*
+ Clear Resource Pool for a given Resource Type on a given PON Port.
+ :return boolean: True if removed else False
+ */
+
+ // delegate to the master instance if sharing enabled across instances
+ SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+ if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+ return SharedResourceMgr.ClearResourceIDPool(IntfID, ResourceType)
+ }
+ Path := PONRMgr.GetPath(IntfID, ResourceType)
+ if Path == "" {
+ log.Error("Failed to get path")
+ return false
+ }
+
+ if err := PONRMgr.KVStore.Delete(Path); err != nil {
+ log.Errorf("Failed to delete resource %s", Path)
+ return false
+ }
+ log.Debugf("Cleared resource %s", Path)
+ return true
+}
+
+func (PONRMgr PONResourceManager) InitResourceMap(PONIntfONUID string) {
+ /*
+ Initialize resource map
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ */
+ // initialize pon_intf_onu_id tuple to alloc_ids map
+ AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+ var AllocIDs []byte
+ Result := PONRMgr.KVStore.Put(AllocIDPath, AllocIDs)
+ if Result != nil {
+ log.Error("Failed to update the KV store")
+ return
+ }
+ // initialize pon_intf_onu_id tuple to gemport_ids map
+ GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+ var GEMPortIDs []byte
+ Result = PONRMgr.KVStore.Put(GEMPortIDPath, GEMPortIDs)
+ if Result != nil {
+ log.Error("Failed to update the KV store")
+ return
+ }
+}
+
+func (PONRMgr PONResourceManager) RemoveResourceMap(PONIntfONUID string) bool {
+ /*
+ Remove resource map
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ */
+ // remove pon_intf_onu_id tuple to alloc_ids map
+ var err error
+ AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+ if err = PONRMgr.KVStore.Delete(AllocIDPath); err != nil {
+ log.Errorf("Failed to remove resource %s", AllocIDPath)
+ return false
+ }
+ // remove pon_intf_onu_id tuple to gemport_ids map
+ GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+ err = PONRMgr.KVStore.Delete(GEMPortIDPath)
+ if err != nil {
+ log.Errorf("Failed to remove resource %s", GEMPortIDPath)
+ return false
+ }
+
+ FlowIDPath := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+ if FlowIDs, err := PONRMgr.KVStore.List(FlowIDPath); err != nil {
+ for _, Flow := range FlowIDs {
+ FlowIDInfoPath := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, PONIntfONUID, Flow.Value)
+ if err = PONRMgr.KVStore.Delete(FlowIDInfoPath); err != nil {
+ log.Errorf("Failed to remove resource %s", FlowIDInfoPath)
+ return false
+ }
+ }
+ }
+
+ if err = PONRMgr.KVStore.Delete(FlowIDPath); err != nil {
+ log.Errorf("Failed to remove resource %s", FlowIDPath)
+ return false
+ }
+
+ return true
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentAllocIDForOnu(IntfONUID string) []uint32 {
+ /*
+ Get currently configured alloc ids for given pon_intf_onu_id
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :return list: List of alloc_ids if available, else None
+ */
+ Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+
+ var Data []uint32
+ Value, err := PONRMgr.KVStore.Get(Path)
+ if err == nil {
+ if Value != nil {
+ Val, err := ToByte(Value.Value)
+ if err != nil {
+ log.Errorw("Failed to convert into byte array", log.Fields{"error": err})
+ return Data
+ }
+ if err = json.Unmarshal(Val, &Data); err != nil {
+ log.Error("Failed to unmarshal", log.Fields{"error": err})
+ return Data
+ }
+ }
+ }
+ return Data
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentGEMPortIDsForOnu(IntfONUID string) []uint32 {
+ /*
+ Get currently configured gemport ids for given pon_intf_onu_id
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :return list: List of gemport IDs if available, else None
+ */
+
+ Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+ log.Debugf("Getting current gemports for %s", Path)
+ var Data []uint32
+ Value, err := PONRMgr.KVStore.Get(Path)
+ if err == nil {
+ if Value != nil {
+ Val, _ := ToByte(Value.Value)
+ if err = json.Unmarshal(Val, &Data); err != nil {
+ log.Errorw("Failed to unmarshal", log.Fields{"error": err})
+ return Data
+ }
+ }
+ } else {
+ log.Errorf("Failed to get data from kvstore for %s", Path)
+ }
+ return Data
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentFlowIDsForOnu(IntfONUID string) []uint32 {
+ /*
+ Get currently configured flow ids for given pon_intf_onu_id
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :return list: List of Flow IDs if available, else None
+ */
+
+ Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+
+ var Data []uint32
+ Value, err := PONRMgr.KVStore.Get(Path)
+ if err == nil {
+ if Value != nil {
+ Val, _ := ToByte(Value.Value)
+ if err = json.Unmarshal(Val, &Data); err != nil {
+ log.Error("Failed to unmarshal")
+ return Data
+ }
+ }
+ }
+ return Data
+}
+
+func (PONRMgr *PONResourceManager) GetFlowIDInfo(IntfONUID string, FlowID uint32, Data interface{}) error {
+ /*
+ Get flow details configured for the ONU.
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :param flow_id: Flow Id reference
+ :param Data: Result
+ :return error: nil if no error in getting from KV store
+ */
+
+ Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+
+ Value, err := PONRMgr.KVStore.Get(Path)
+ if err == nil {
+ if Value != nil {
+ Val, err := ToByte(Value.Value)
+ if err != nil {
+ log.Errorw("Failed to convert flowinfo into byte array", log.Fields{"error": err})
+ return err
+ }
+ if err = json.Unmarshal(Val, Data); err != nil {
+ log.Errorw("Failed to unmarshal", log.Fields{"error": err})
+ return err
+ }
+ }
+ }
+ return err
+}
+
+func (PONRMgr *PONResourceManager) RemoveFlowIDInfo(IntfONUID string, FlowID uint32) bool {
+ /*
+ Get flow_id details configured for the ONU.
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :param flow_id: Flow Id reference
+ */
+ Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+
+ if err := PONRMgr.KVStore.Delete(Path); err != nil {
+ log.Errorf("Falied to remove resource %s", Path)
+ return false
+ }
+ return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateAllocIdsForOnu(IntfONUID string, AllocIDs []uint32) error {
+ /*
+ Update currently configured alloc ids for given pon_intf_onu_id
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :param alloc_ids: list of alloc ids
+ */
+ var Value []byte
+ var err error
+ Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+ Value, err = json.Marshal(AllocIDs)
+ if err != nil {
+ log.Error("failed to Marshal")
+ return err
+ }
+
+ if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+ log.Errorf("Failed to update resource %s", Path)
+ return err
+ }
+ return err
+}
+
+func (PONRMgr *PONResourceManager) UpdateGEMPortIDsForOnu(IntfONUID string, GEMPortIDs []uint32) error {
+ /*
+ Update currently configured gemport ids for given pon_intf_onu_id
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :param gemport_ids: list of gem port ids
+ */
+
+ var Value []byte
+ var err error
+ Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+ log.Debugf("Updating gemport ids for %s", Path)
+ Value, err = json.Marshal(GEMPortIDs)
+ if err != nil {
+ log.Error("failed to Marshal")
+ return err
+ }
+
+ if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+ log.Errorf("Failed to update resource %s", Path)
+ return err
+ }
+ return err
+}
+
+func checkForFlowIDInList(FlowIDList []uint32, FlowID uint32) (bool, uint32) {
+ /*
+ Check for a flow id in a given list of flow IDs.
+ :param FLowIDList: List of Flow IDs
+ :param FlowID: Flowd to check in the list
+ : return true and the index if present false otherwise.
+ */
+
+ for idx, _ := range FlowIDList {
+ if FlowID == FlowIDList[idx] {
+ return true, uint32(idx)
+ }
+ }
+ return false, 0
+}
+
+func (PONRMgr *PONResourceManager) UpdateFlowIDForOnu(IntfONUID string, FlowID uint32, Add bool) error {
+ /*
+ Update the flow_id list of the ONU (add or remove flow_id from the list)
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :param flow_id: flow ID
+ :param add: Boolean flag to indicate whether the flow_id should be
+ added or removed from the list. Defaults to adding the flow.
+ */
+ var Value []byte
+ var err error
+ var RetVal bool
+ var IDx uint32
+ Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+ FlowIDs := PONRMgr.GetCurrentFlowIDsForOnu(IntfONUID)
+
+ if Add {
+ if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); RetVal == true {
+ return err
+ }
+ FlowIDs = append(FlowIDs, FlowID)
+ } else {
+ if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); RetVal == false {
+ return err
+ }
+ // delete the index and shift
+ FlowIDs = append(FlowIDs[:IDx], FlowIDs[IDx+1:]...)
+ }
+ Value, err = json.Marshal(FlowIDs)
+ if err != nil {
+ log.Error("Failed to Marshal")
+ return err
+ }
+
+ if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+ log.Errorf("Failed to update resource %s", Path)
+ return err
+ }
+ return err
+}
+
+func (PONRMgr *PONResourceManager) UpdateFlowIDInfoForOnu(IntfONUID string, FlowID uint32, FlowData interface{}) error {
+ /*
+ Update any metadata associated with the flow_id. The flow_data could be json
+ or any of other data structure. The resource manager doesnt care
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :param flow_id: Flow ID
+ :param flow_data: Flow data blob
+ */
+ var Value []byte
+ var err error
+ Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+ Value, err = json.Marshal(FlowData)
+ if err != nil {
+ log.Error("failed to Marshal")
+ return err
+ }
+
+ if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+ log.Errorf("Failed to update resource %s", Path)
+ return err
+ }
+ return err
+}
+
+func (PONRMgr *PONResourceManager) GenerateNextID(Resource map[string]interface{}) (uint32, error) {
+ /*
+ Generate unique id having OFFSET as start
+ :param resource: resource used to generate ID
+ :return uint32: generated id
+ */
+ ByteArray, err := ToByte(Resource[POOL])
+ if err != nil {
+ log.Error("Failed to convert resource to byte array")
+ return 0, err
+ }
+ Data := bitmap.TSFromData(ByteArray, false)
+ if Data == nil {
+ log.Error("Failed to get data from byte array")
+ return 0, errors.New("Failed to get data from byte array")
+ }
+
+ Len := Data.Len()
+ var Idx int
+ for Idx = 0; Idx < Len; Idx++ {
+ Val := Data.Get(Idx)
+ if Val == false {
+ break
+ }
+ }
+ Data.Set(Idx, true)
+ res := uint32(Resource[START_IDX].(float64))
+ Resource[POOL] = Data.Data(false)
+ log.Debugf("Generated ID for %d", (uint32(Idx) + res))
+ return (uint32(Idx) + res), err
+}
+
+func (PONRMgr *PONResourceManager) ReleaseID(Resource map[string]interface{}, Id uint32) bool {
+ /*
+ Release unique id having OFFSET as start index.
+ :param resource: resource used to release ID
+ :param unique_id: id need to be released
+ */
+ ByteArray, err := ToByte(Resource[POOL])
+ if err != nil {
+ log.Error("Failed to convert resource to byte array")
+ return false
+ }
+ Data := bitmap.TSFromData(ByteArray, false)
+ if Data == nil {
+ log.Error("Failed to get resource pool")
+ return false
+ }
+ var Idx uint32
+ Idx = Id - uint32(Resource[START_IDX].(float64))
+ Data.Set(int(Idx), false)
+ Resource[POOL] = Data.Data(false)
+
+ return true
+}
+
+func (PONRMgr *PONResourceManager) GetTechnology() string {
+ return PONRMgr.Technology
+}
+
+func (PONRMgr *PONResourceManager) GetResourceTypeAllocID() string {
+ return ALLOC_ID
+}
+
+func (PONRMgr *PONResourceManager) GetResourceTypeGemPortID() string {
+ return GEMPORT_ID
+}
+
+// ToByte converts an interface value to a []byte. The interface should either be of
+// a string type or []byte. Otherwise, an error is returned.
+func ToByte(value interface{}) ([]byte, error) {
+ switch t := value.(type) {
+ case []byte:
+ return value.([]byte), nil
+ case string:
+ return []byte(value.(string)), nil
+ default:
+ return nil, fmt.Errorf("unexpected-type-%T", t)
+ }
+}
+
+// ToString converts an interface value to a string. The interface should either be of
+// a string type or []byte. Otherwise, an error is returned.
+func ToString(value interface{}) (string, error) {
+ switch t := value.(type) {
+ case []byte:
+ return string(value.([]byte)), nil
+ case string:
+ return value.(string), nil
+ default:
+ return "", fmt.Errorf("unexpected-type-%T", t)
+ }
+}
diff --git a/pkg/common/probe/probe.go b/pkg/common/probe/probe.go
new file mode 100644
index 0000000..910e0c6
--- /dev/null
+++ b/pkg/common/probe/probe.go
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+ "context"
+ "fmt"
+ "github.com/opencord/voltha-lib-go/pkg/common/log"
+ "net/http"
+ "sync"
+)
+
+// ProbeContextKey used to fetch the Probe instance from a context
+type ProbeContextKeyType string
+
+// ServiceStatus typed values for service status
+type ServiceStatus int
+
+const (
+ // ServiceStatusUnknown initial state of services
+ ServiceStatusUnknown ServiceStatus = iota
+
+ // ServiceStatusPreparing to optionally be used for prep, such as connecting
+ ServiceStatusPreparing
+
+ // ServiceStatusPrepared to optionally be used when prep is complete, but before run
+ ServiceStatusPrepared
+
+ // ServiceStatusRunning service is functional
+ ServiceStatusRunning
+
+ // ServiceStatusStopped service has stopped, but not because of error
+ ServiceStatusStopped
+
+ // ServiceStatusFailed service has stopped because of an error
+ ServiceStatusFailed
+)
+
+const (
+ // ProbeContextKey value of context key to fetch probe
+ ProbeContextKey = ProbeContextKeyType("status-update-probe")
+)
+
+// String convert ServiceStatus values to strings
+func (s ServiceStatus) String() string {
+ switch s {
+ default:
+ fallthrough
+ case ServiceStatusUnknown:
+ return "Unknown"
+ case ServiceStatusPreparing:
+ return "Preparing"
+ case ServiceStatusPrepared:
+ return "Prepared"
+ case ServiceStatusRunning:
+ return "Running"
+ case ServiceStatusStopped:
+ return "Stopped"
+ case ServiceStatusFailed:
+ return "Failed"
+ }
+}
+
+// ServiceStatusUpdate status update event
+type ServiceStatusUpdate struct {
+ Name string
+ Status ServiceStatus
+}
+
+// Probe reciever on which to implement probe capabilities
+type Probe struct {
+ readyFunc func(map[string]ServiceStatus) bool
+ healthFunc func(map[string]ServiceStatus) bool
+
+ mutex sync.RWMutex
+ status map[string]ServiceStatus
+ isReady bool
+ isHealthy bool
+}
+
+// WithReadyFunc override the default ready calculation function
+func (p *Probe) WithReadyFunc(readyFunc func(map[string]ServiceStatus) bool) *Probe {
+ p.readyFunc = readyFunc
+ return p
+}
+
+// WithHealthFunc override the default health calculation function
+func (p *Probe) WithHealthFunc(healthFunc func(map[string]ServiceStatus) bool) *Probe {
+ p.healthFunc = healthFunc
+ return p
+}
+
+// RegisterService register one or more service names with the probe, status will be track against service name
+func (p *Probe) RegisterService(names ...string) {
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+ if p.status == nil {
+ p.status = make(map[string]ServiceStatus)
+ }
+ for _, name := range names {
+ if _, ok := p.status[name]; !ok {
+ p.status[name] = ServiceStatusUnknown
+ log.Debugw("probe-service-registered", log.Fields{"service-name": name})
+ }
+ }
+
+ if p.readyFunc != nil {
+ p.isReady = p.readyFunc(p.status)
+ } else {
+ p.isReady = defaultReadyFunc(p.status)
+ }
+
+ if p.healthFunc != nil {
+ p.isHealthy = p.healthFunc(p.status)
+ } else {
+ p.isHealthy = defaultHealthFunc(p.status)
+ }
+}
+
+// UpdateStatus utility function to send a service update to the probe
+func (p *Probe) UpdateStatus(name string, status ServiceStatus) {
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+ if p.status == nil {
+ p.status = make(map[string]ServiceStatus)
+ }
+ p.status[name] = status
+ if p.readyFunc != nil {
+ p.isReady = p.readyFunc(p.status)
+ } else {
+ p.isReady = defaultReadyFunc(p.status)
+ }
+
+ if p.healthFunc != nil {
+ p.isHealthy = p.healthFunc(p.status)
+ } else {
+ p.isHealthy = defaultHealthFunc(p.status)
+ }
+ log.Debugw("probe-service-status-updated",
+ log.Fields{
+ "service-name": name,
+ "status": status.String(),
+ "ready": p.isReady,
+ "health": p.isHealthy,
+ })
+}
+
+// UpdateStatusFromContext a convenience function to pull the Probe reference from the
+// Context, if it exists, and then calling UpdateStatus on that Probe reference. If Context
+// is nil or if a Probe reference is not associated with the ProbeContextKey then nothing
+// happens
+func UpdateStatusFromContext(ctx context.Context, name string, status ServiceStatus) {
+ if ctx != nil {
+ if value := ctx.Value(ProbeContextKey); value != nil {
+ if p, ok := value.(*Probe); ok {
+ p.UpdateStatus(name, status)
+ }
+ }
+ }
+}
+
+// pulled out to a function to help better enable unit testing
+func (p *Probe) readzFunc(w http.ResponseWriter, req *http.Request) {
+ p.mutex.RLock()
+ defer p.mutex.RUnlock()
+ if p.isReady {
+ w.WriteHeader(http.StatusOK)
+ } else {
+ w.WriteHeader(http.StatusTeapot)
+ }
+}
+func (p *Probe) healthzFunc(w http.ResponseWriter, req *http.Request) {
+ p.mutex.RLock()
+ defer p.mutex.RUnlock()
+ if p.isHealthy {
+ w.WriteHeader(http.StatusOK)
+ } else {
+ w.WriteHeader(http.StatusTeapot)
+ }
+}
+func (p *Probe) detailzFunc(w http.ResponseWriter, req *http.Request) {
+ p.mutex.RLock()
+ defer p.mutex.RUnlock()
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte("{"))
+ comma := ""
+ for c, s := range p.status {
+ w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String())))
+ comma = ", "
+ }
+ w.Write([]byte("}"))
+ w.WriteHeader(http.StatusOK)
+
+}
+
+// ListenAndServe implements 3 HTTP endpoints on the given port for healthz, readz, and detailz. Returns only on error
+func (p *Probe) ListenAndServe(address string) {
+ mux := http.NewServeMux()
+
+ // Returns the result of the readyFunc calculation
+ mux.HandleFunc("/readz", p.readzFunc)
+
+ // Returns the result of the healthFunc calculation
+ mux.HandleFunc("/healthz", p.healthzFunc)
+
+ // Returns the details of the services and their status as JSON
+ mux.HandleFunc("/detailz", p.detailzFunc)
+ s := &http.Server{
+ Addr: address,
+ Handler: mux,
+ }
+ log.Fatal(s.ListenAndServe())
+}
+
+// defaultReadyFunc if all services are running then ready, else not
+func defaultReadyFunc(services map[string]ServiceStatus) bool {
+ if len(services) == 0 {
+ return false
+ }
+ for _, status := range services {
+ if status != ServiceStatusRunning {
+ return false
+ }
+ }
+ return true
+}
+
+// defaultHealthFunc if no service is stopped or failed, then healthy, else not.
+// service is start as unknown, so they are considered healthy
+func defaultHealthFunc(services map[string]ServiceStatus) bool {
+ if len(services) == 0 {
+ return false
+ }
+ for _, status := range services {
+ if status == ServiceStatusStopped || status == ServiceStatusFailed {
+ return false
+ }
+ }
+ return true
+}
diff --git a/pkg/common/probe/probe_test.go b/pkg/common/probe/probe_test.go
new file mode 100644
index 0000000..1322d47
--- /dev/null
+++ b/pkg/common/probe/probe_test.go
@@ -0,0 +1,394 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/opencord/voltha-lib-go/pkg/common/log"
+ "github.com/stretchr/testify/assert"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func init() {
+ log.AddPackage(log.JSON, log.WarnLevel, nil)
+}
+
+func TestServiceStatusString(t *testing.T) {
+ assert.Equal(t, "Unknown", ServiceStatusUnknown.String(), "ServiceStatusUnknown")
+ assert.Equal(t, "Preparing", ServiceStatusPreparing.String(), "ServiceStatusPreparing")
+ assert.Equal(t, "Prepared", ServiceStatusPrepared.String(), "ServiceStatusPrepared")
+ assert.Equal(t, "Running", ServiceStatusRunning.String(), "ServiceStatusRunning")
+ assert.Equal(t, "Stopped", ServiceStatusStopped.String(), "ServiceStatusStopped")
+ assert.Equal(t, "Failed", ServiceStatusFailed.String(), "ServiceStatusFailed")
+}
+
+func AlwaysTrue(map[string]ServiceStatus) bool {
+ return true
+}
+
+func AlwaysFalse(map[string]ServiceStatus) bool {
+ return false
+}
+
+func TestWithFuncs(t *testing.T) {
+ p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysFalse)
+
+ assert.NotNil(t, p.readyFunc, "ready func not set")
+ assert.True(t, p.readyFunc(nil), "ready func not set correctly")
+ assert.NotNil(t, p.healthFunc, "health func not set")
+ assert.False(t, p.healthFunc(nil), "health func not set correctly")
+}
+
+func TestWithReadyFuncOnly(t *testing.T) {
+ p := (&Probe{}).WithReadyFunc(AlwaysTrue)
+
+ assert.NotNil(t, p.readyFunc, "ready func not set")
+ assert.True(t, p.readyFunc(nil), "ready func not set correctly")
+ assert.Nil(t, p.healthFunc, "health func set")
+}
+
+func TestWithHealthFuncOnly(t *testing.T) {
+ p := (&Probe{}).WithHealthFunc(AlwaysTrue)
+
+ assert.Nil(t, p.readyFunc, "ready func set")
+ assert.NotNil(t, p.healthFunc, "health func not set")
+ assert.True(t, p.healthFunc(nil), "health func not set correctly")
+}
+
+func TestRegisterOneService(t *testing.T) {
+ p := &Probe{}
+
+ p.RegisterService("one")
+
+ assert.Equal(t, 1, len(p.status), "wrong number of services")
+
+ _, ok := p.status["one"]
+ assert.True(t, ok, "service not found")
+}
+
+func TestRegisterMultipleServices(t *testing.T) {
+ p := &Probe{}
+
+ p.RegisterService("one", "two", "three", "four")
+
+ assert.Equal(t, 4, len(p.status), "wrong number of services")
+
+ _, ok := p.status["one"]
+ assert.True(t, ok, "service one not found")
+ _, ok = p.status["two"]
+ assert.True(t, ok, "service two not found")
+ _, ok = p.status["three"]
+ assert.True(t, ok, "service three not found")
+ _, ok = p.status["four"]
+ assert.True(t, ok, "service four not found")
+}
+
+func TestRegisterMultipleServicesIncremental(t *testing.T) {
+ p := &Probe{}
+
+ p.RegisterService("one")
+ p.RegisterService("two")
+ p.RegisterService("three", "four")
+
+ assert.Equal(t, 4, len(p.status), "wrong number of services")
+
+ _, ok := p.status["one"]
+ assert.True(t, ok, "service one not found")
+ _, ok = p.status["two"]
+ assert.True(t, ok, "service two not found")
+ _, ok = p.status["three"]
+ assert.True(t, ok, "service three not found")
+ _, ok = p.status["four"]
+ assert.True(t, ok, "service four not found")
+}
+
+func TestRegisterMultipleServicesDuplicates(t *testing.T) {
+ p := &Probe{}
+
+ p.RegisterService("one", "one", "one", "two")
+
+ assert.Equal(t, 2, len(p.status), "wrong number of services")
+
+ _, ok := p.status["one"]
+ assert.True(t, ok, "service one not found")
+ _, ok = p.status["two"]
+ assert.True(t, ok, "service two not found")
+}
+
+func TestRegisterMultipleServicesDuplicatesIncremental(t *testing.T) {
+ p := &Probe{}
+
+ p.RegisterService("one")
+ p.RegisterService("one")
+ p.RegisterService("one", "two")
+
+ assert.Equal(t, 2, len(p.status), "wrong number of services")
+
+ _, ok := p.status["one"]
+ assert.True(t, ok, "service one not found")
+ _, ok = p.status["two"]
+ assert.True(t, ok, "service two not found")
+}
+
+func TestUpdateStatus(t *testing.T) {
+ p := &Probe{}
+
+ p.RegisterService("one", "two")
+ p.UpdateStatus("one", ServiceStatusRunning)
+
+ assert.Equal(t, ServiceStatusRunning, p.status["one"], "status not set")
+ assert.Equal(t, ServiceStatusUnknown, p.status["two"], "status set")
+}
+
+func TestRegisterOverwriteStatus(t *testing.T) {
+ p := &Probe{}
+
+ p.RegisterService("one", "two")
+ p.UpdateStatus("one", ServiceStatusRunning)
+
+ assert.Equal(t, ServiceStatusRunning, p.status["one"], "status not set")
+ assert.Equal(t, ServiceStatusUnknown, p.status["two"], "status set")
+
+ p.RegisterService("one", "three")
+ assert.Equal(t, 3, len(p.status), "wrong number of services")
+ assert.Equal(t, ServiceStatusRunning, p.status["one"], "status overridden")
+ assert.Equal(t, ServiceStatusUnknown, p.status["two"], "status set")
+ assert.Equal(t, ServiceStatusUnknown, p.status["three"], "status set")
+}
+
+func TestDetailzWithServies(t *testing.T) {
+ p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysTrue)
+ p.RegisterService("one", "two")
+
+ req := httptest.NewRequest("GET", "http://example.com/detailz", nil)
+ w := httptest.NewRecorder()
+ p.detailzFunc(w, req)
+ resp := w.Result()
+ body, _ := ioutil.ReadAll(resp.Body)
+
+ assert.Equal(t, http.StatusOK, resp.StatusCode, "invalid status code for no services")
+ assert.Equal(t, "application/json", resp.Header.Get("Content-Type"), "wrong content type")
+ var vals map[string]string
+ err := json.Unmarshal(body, &vals)
+ assert.Nil(t, err, "unable to unmarshal values")
+ assert.Equal(t, "Unknown", vals["one"], "wrong value")
+ assert.Equal(t, "Unknown", vals["two"], "wrong value")
+}
+
+func TestReadzNoServices(t *testing.T) {
+ p := (&Probe{}).WithReadyFunc(AlwaysTrue)
+ req := httptest.NewRequest("GET", "http://example.com/readz", nil)
+ w := httptest.NewRecorder()
+ p.readzFunc(w, req)
+ resp := w.Result()
+
+ assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code for no services")
+}
+
+func TestReadzWithServicesWithTrue(t *testing.T) {
+ p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysTrue)
+ p.RegisterService("one", "two")
+
+ req := httptest.NewRequest("GET", "http://example.com/readz", nil)
+ w := httptest.NewRecorder()
+ p.readzFunc(w, req)
+ resp := w.Result()
+ assert.Equal(t, http.StatusOK, resp.StatusCode, "invalid status code for registered only services")
+}
+
+func TestReadzWithServicesWithDefault(t *testing.T) {
+ p := &Probe{}
+ p.RegisterService("one", "two")
+
+ req := httptest.NewRequest("GET", "http://example.com/readz", nil)
+ w := httptest.NewRecorder()
+ p.readzFunc(w, req)
+ resp := w.Result()
+ assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code for registered only services")
+}
+
+func TestReadzNpServicesDefault(t *testing.T) {
+ p := &Probe{}
+
+ req := httptest.NewRequest("GET", "http://example.com/readz", nil)
+ w := httptest.NewRecorder()
+ p.readzFunc(w, req)
+ resp := w.Result()
+ assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code")
+}
+
+func TestReadzWithServicesDefault(t *testing.T) {
+ p := &Probe{}
+ p.RegisterService("one", "two")
+ p.UpdateStatus("one", ServiceStatusRunning)
+ p.UpdateStatus("two", ServiceStatusRunning)
+
+ req := httptest.NewRequest("GET", "http://example.com/readz", nil)
+ w := httptest.NewRecorder()
+ p.readzFunc(w, req)
+ resp := w.Result()
+ assert.Equal(t, http.StatusOK, resp.StatusCode, "invalid status code")
+}
+
+func TestReadzWithServicesDefaultOne(t *testing.T) {
+ p := &Probe{}
+ p.RegisterService("one", "two")
+ p.UpdateStatus("one", ServiceStatusRunning)
+
+ req := httptest.NewRequest("GET", "http://example.com/readz", nil)
+ w := httptest.NewRecorder()
+ p.readzFunc(w, req)
+ resp := w.Result()
+ assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code")
+}
+
+func TestHealthzNoServices(t *testing.T) {
+ p := (&Probe{}).WithReadyFunc(AlwaysTrue)
+ req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
+ w := httptest.NewRecorder()
+ p.healthzFunc(w, req)
+ resp := w.Result()
+
+ assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code for no services")
+}
+
+func TestHealthzWithServicesWithTrue(t *testing.T) {
+ p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysTrue)
+ p.RegisterService("one", "two")
+
+ req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
+ w := httptest.NewRecorder()
+ p.healthzFunc(w, req)
+ resp := w.Result()
+ assert.Equal(t, http.StatusOK, resp.StatusCode, "invalid status code for registered only services")
+}
+
+func TestHealthzWithServicesWithDefault(t *testing.T) {
+ p := &Probe{}
+ p.RegisterService("one", "two")
+
+ req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
+ w := httptest.NewRecorder()
+ p.healthzFunc(w, req)
+ resp := w.Result()
+ assert.Equal(t, http.StatusOK, resp.StatusCode, "invalid status code for registered only services")
+}
+
+func TestHealthzNoServicesDefault(t *testing.T) {
+ p := &Probe{}
+
+ req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
+ w := httptest.NewRecorder()
+ p.healthzFunc(w, req)
+ resp := w.Result()
+ assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code")
+}
+
+func TestHealthzWithServicesDefault(t *testing.T) {
+ p := &Probe{}
+ p.RegisterService("one", "two")
+ p.UpdateStatus("one", ServiceStatusRunning)
+ p.UpdateStatus("two", ServiceStatusRunning)
+
+ req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
+ w := httptest.NewRecorder()
+ p.healthzFunc(w, req)
+ resp := w.Result()
+ assert.Equal(t, http.StatusOK, resp.StatusCode, "invalid status code")
+}
+
+func TestHealthzWithServicesDefaultFailed(t *testing.T) {
+ p := &Probe{}
+ p.RegisterService("one", "two")
+ p.UpdateStatus("one", ServiceStatusFailed)
+
+ req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
+ w := httptest.NewRecorder()
+ p.healthzFunc(w, req)
+ resp := w.Result()
+ assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code")
+}
+
+func TestSetFuncsToNil(t *testing.T) {
+ p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysFalse)
+ p.WithReadyFunc(nil).WithHealthFunc(nil)
+ assert.Nil(t, p.readyFunc, "ready func not reset to nil")
+ assert.Nil(t, p.healthFunc, "health func not reset to nil")
+}
+
+func TestUpdateStatusFromContext(t *testing.T) {
+ p := &Probe{}
+ p.RegisterService("one")
+ ctx := context.WithValue(context.Background(), ProbeContextKey, p)
+ UpdateStatusFromContext(ctx, "one", ServiceStatusRunning)
+
+ assert.Equal(t, 1, len(p.status), "wrong number of services")
+ _, ok := p.status["one"]
+ assert.True(t, ok, "unable to find registered service")
+ assert.Equal(t, ServiceStatusRunning, p.status["one"], "status not set correctly from context")
+
+}
+
+func TestUpdateStatusFromNilContext(t *testing.T) {
+ p := &Probe{}
+ p.RegisterService("one")
+ UpdateStatusFromContext(nil, "one", ServiceStatusRunning)
+
+ assert.Equal(t, 1, len(p.status), "wrong number of services")
+ _, ok := p.status["one"]
+ assert.True(t, ok, "unable to find registered service")
+ assert.Equal(t, ServiceStatusUnknown, p.status["one"], "status not set correctly from context")
+
+}
+
+func TestUpdateStatusFromContextWithoutProbe(t *testing.T) {
+ p := &Probe{}
+ p.RegisterService("one")
+ ctx := context.Background()
+ UpdateStatusFromContext(ctx, "one", ServiceStatusRunning)
+
+ assert.Equal(t, 1, len(p.status), "wrong number of services")
+ _, ok := p.status["one"]
+ assert.True(t, ok, "unable to find registered service")
+ assert.Equal(t, ServiceStatusUnknown, p.status["one"], "status not set correctly from context")
+
+}
+
+func TestUpdateStatusFromContextWrongType(t *testing.T) {
+ p := &Probe{}
+ p.RegisterService("one")
+ ctx := context.WithValue(context.Background(), ProbeContextKey, "Teapot")
+ UpdateStatusFromContext(ctx, "one", ServiceStatusRunning)
+
+ assert.Equal(t, 1, len(p.status), "wrong number of services")
+ _, ok := p.status["one"]
+ assert.True(t, ok, "unable to find registered service")
+ assert.Equal(t, ServiceStatusUnknown, p.status["one"], "status not set correctly from context")
+}
+
+func TestUpdateStatusNoRegistered(t *testing.T) {
+ p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysFalse)
+
+ p.UpdateStatus("one", ServiceStatusRunning)
+ assert.Equal(t, 1, len(p.status), "wrong number of services")
+ _, ok := p.status["one"]
+ assert.True(t, ok, "unable to find registered service")
+ assert.Equal(t, ServiceStatusRunning, p.status["one"], "status not set correctly from context")
+}
diff --git a/pkg/common/techprofile/4QueueHybridProfileMap1.json b/pkg/common/techprofile/4QueueHybridProfileMap1.json
new file mode 100644
index 0000000..d11f8e4
--- /dev/null
+++ b/pkg/common/techprofile/4QueueHybridProfileMap1.json
@@ -0,0 +1,141 @@
+ {
+ "name": "4QueueHybridProfileMap1",
+ "profile_type": "XPON",
+ "version": 1,
+ "num_gem_ports": 4,
+ "instance_control": {
+ "onu": "multi-instance",
+ "uni": "single-instance",
+ "max_gem_payload_size": "auto"
+ },
+ "us_scheduler": {
+ "additional_bw": "AdditionalBW_Auto",
+ "direction": "UPSTREAM",
+ "priority": 0,
+ "weight": 0,
+ "q_sched_policy": "Hybrid"
+ },
+ "ds_scheduler": {
+ "additional_bw": "AdditionalBW_Auto",
+ "direction": "DOWNSTREAM",
+ "priority": 0,
+ "weight": 0,
+ "q_sched_policy": "Hybrid"
+ },
+ "upstream_gem_port_attribute_list": [
+ {
+ "pbit_map": "0b00000101",
+ "aes_encryption": "True",
+ "scheduling_policy": "WRR",
+ "priority_q": 4,
+ "weight": 25,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "max_threshold": 0,
+ "min_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b00011010",
+ "aes_encryption": "True",
+ "scheduling_policy": "WRR",
+ "priority_q": 3,
+ "weight": 75,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b00100000",
+ "aes_encryption": "True",
+ "scheduling_policy": "StrictPriority",
+ "priority_q": 2,
+ "weight": 0,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b11000000",
+ "aes_encryption": "True",
+ "scheduling_policy": "StrictPriority",
+ "priority_q": 1,
+ "weight": 25,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ }
+ ],
+ "downstream_gem_port_attribute_list": [
+ {
+ "pbit_map": "0b00000101",
+ "aes_encryption": "True",
+ "scheduling_policy": "WRR",
+ "priority_q": 4,
+ "weight": 10,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b00011010",
+ "aes_encryption": "True",
+ "scheduling_policy": "WRR",
+ "priority_q": 3,
+ "weight": 90,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b00100000",
+ "aes_encryption": "True",
+ "scheduling_policy": "StrictPriority",
+ "priority_q": 2,
+ "weight": 0,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b11000000",
+ "aes_encryption": "True",
+ "scheduling_policy": "StrictPriority",
+ "priority_q": 1,
+ "weight": 25,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ }
+ ]
+}
diff --git a/pkg/common/techprofile/README.md b/pkg/common/techprofile/README.md
new file mode 100644
index 0000000..03a396d
--- /dev/null
+++ b/pkg/common/techprofile/README.md
@@ -0,0 +1,336 @@
+Technology Profile Management
+Overview
+Technology profiles that are utilized by VOLTHA are stored in a prescribed structure in VOLTHA's key/value store, which is currently etcd. The key structure used to access technology profiles is /voltha/technology_profiles//; where TID is the numeric ID of the technology profile and TECHNOLOGY specifies the technology being utilized by the adapter, e.g. xgspon. While the TID key is a directory, the TECHNOLOGY key should be set to the JSON data that represents the technology profile values.
+
+NOTE: The content of a technology profile represents a contract between the technology profile definition and all adapters that consume that technology profile. The structure and content of the profiles are outside the scope of Technology Profile Management. Technology profile management only specifies the key/value structure in which profiles are stored.
+
+Example JSON :
+
+{
+ "name": "4QueueHybridProfileMap1",
+ "profile_type": "XPON",
+ "version": 1,
+ "num_gem_ports": 4,
+ "instance_control": {
+ "onu": "multi-instance",
+ "uni": "single-instance",
+ "max_gem_payload_size": "auto"
+ },
+ "us_scheduler": {
+ "additional_bw": "auto",
+ "direction": "UPSTREAM",
+ "priority": 0,
+ "weight": 0,
+ "q_sched_policy": "hybrid"
+ },
+ "ds_scheduler": {
+ "additional_bw": "auto",
+ "direction": "DOWNSTREAM",
+ "priority": 0,
+ "weight": 0,
+ "q_sched_policy": "hybrid"
+ },
+ "upstream_gem_port_attribute_list": [
+ {
+ "pbit_map": "0b00000101",
+ "aes_encryption": "True",
+ "scheduling_policy": "WRR",
+ "priority_q": 4,
+ "weight": 25,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "max_threshold": 0,
+ "min_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b00011010",
+ "aes_encryption": "True",
+ "scheduling_policy": "WRR",
+ "priority_q": 3,
+ "weight": 75,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b00100000",
+ "aes_encryption": "True",
+ "scheduling_policy": "StrictPriority",
+ "priority_q": 2,
+ "weight": 0,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b11000000",
+ "aes_encryption": "True",
+ "scheduling_policy": "StrictPriority",
+ "priority_q": 1,
+ "weight": 25,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ }
+ ],
+ "downstream_gem_port_attribute_list": [
+ {
+ "pbit_map": "0b00000101",
+ "aes_encryption": "True",
+ "scheduling_policy": "WRR",
+ "priority_q": 4,
+ "weight": 10,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b00011010",
+ "aes_encryption": "True",
+ "scheduling_policy": "WRR",
+ "priority_q": 3,
+ "weight": 90,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b00100000",
+ "aes_encryption": "True",
+ "scheduling_policy": "StrictPriority",
+ "priority_q": 2,
+ "weight": 0,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b11000000",
+ "aes_encryption": "True",
+ "scheduling_policy": "StrictPriority",
+ "priority_q": 1,
+ "weight": 25,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ }
+ ]
+}
+
+Creating Technology Profiles
+Technology profiles are a simple JSON object. This JSON object can be created using a variety of tools such as Vim, Emacs, or various IDEs. JQ can be a useful tool for validating a JSON object. Once a file is created with the JSON object it can be stored in VOLTHA key/value store using the standard etcd command line tool etcdctl or using an HTTP POST operation using Curl.
+
+Assuming you are in a standard VOLTHA deployment within a Kubernetes cluster you can access the etcd key/value store using kubectl via the PODs named etcd-cluster-0000, etcd-cluster-0001, or etcd-cluster-0002. For the examples in this document etcd-cluster-0000 will be used, but it really shouldn't matter which is used.
+
+ETCD version 3 is being used in techprofile module : Export this variable before using curl operation , export ETCDCTL_API=3
+
+Assuming the Technology template is stored in a local file 4QueueHybridProfileMap1.json the following commands could be used to store or update the technical template into the proper location in the etcd key/value store:
+
+# Store a Technology template using etcdctl
+jq -c . 4QueueHybridProfileMap1.json | kubectl exec -i etcd-cluster-0000 -- etcdctl set service/voltha/technology_profiles/xgspon/64
+
+jq -c . 4QueueHybridProfileMap1.json | etcdctl --endpoints=<ETCDIP>:2379 put service/voltha/technology_profiles/xgspon/64
+
+
+# Store a Technology template using curl
+curl -sSL -XPUT http://10.233.53.161:2379/v2/keys/service/voltha/technology_profiles/xgspon/64 -d value="$(jq -c . 4QueueHybridProfileMap1.json)"
+In the examples above, the command jq is used. This command can be installed using standard package management tools on most Linux systems. In the examples the "-c" option is used to compress the JSON. Using this tool is not necessary, and if you choose not to use the tool, you can replace "jq -c ," in the above examples with the "cat" command. More on jq can be found at https://stedolan.github.io/jq/.
+
+Listing Technical Profiles for a given Technology
+While both curl and etcdctl (via kubectl) can be used to list or view the available Technology profiles, etcdctl is easier, and thus will be used in the examples. For listing Technology profiles etcdctl ls is used. In can be used in conjunction with the -r option to recursively list profiles.
+
+
+#List Tech profile
+etcdctl --endpoints=<EtcdIPAddres>:2379 get service/voltha/technology_profiles/xgspon/64
+
+
+# Example output
+A specified Technology profile can be viewed with the etcdctl get command. (Again, jq is used for presentation purposes, and is not required)
+
+# Display a specified Technology profile, using jq to pretty print
+kubectl exec -i etcd-cluster-0000 -- etcdctl get /xgspon/64 | jq .
+
+etcdctl --endpoints=<ETCDIP>:2379 get service/voltha/technology_profiles/xgspon/64
+# Example outpout
+service/voltha/technology_profiles/xgspon/64/uni-1
+{
+ "name": "4QueueHybridProfileMap1",
+ "profile_type": "XPON",
+ "version": 1,
+ "num_gem_ports": 4,
+ "instance_control": {
+ "onu": "multi-instance",
+ "uni": "single-instance",
+ "max_gem_payload_size": "auto"
+ },
+ "us_scheduler": {
+ "additional_bw": "auto",
+ "direction": "UPSTREAM",
+ "priority": 0,
+ "weight": 0,
+ "q_sched_policy": "hybrid"
+ },
+ "ds_scheduler": {
+ "additional_bw": "auto",
+ "direction": "DOWNSTREAM",
+ "priority": 0,
+ "weight": 0,
+ "q_sched_policy": "hybrid"
+ },
+ "upstream_gem_port_attribute_list": [
+ {
+ "pbit_map": "0b00000101",
+ "aes_encryption": "True",
+ "scheduling_policy": "WRR",
+ "priority_q": 4,
+ "weight": 25,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "max_threshold": 0,
+ "min_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b00011010",
+ "aes_encryption": "True",
+ "scheduling_policy": "WRR",
+ "priority_q": 3,
+ "weight": 75,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b00100000",
+ "aes_encryption": "True",
+ "scheduling_policy": "StrictPriority",
+ "priority_q": 2,
+ "weight": 0,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b11000000",
+ "aes_encryption": "True",
+ "scheduling_policy": "StrictPriority",
+ "priority_q": 1,
+ "weight": 25,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ }
+ ],
+ "downstream_gem_port_attribute_list": [
+ {
+ "pbit_map": "0b00000101",
+ "aes_encryption": "True",
+ "scheduling_policy": "WRR",
+ "priority_q": 4,
+ "weight": 10,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b00011010",
+ "aes_encryption": "True",
+ "scheduling_policy": "WRR",
+ "priority_q": 3,
+ "weight": 90,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b00100000",
+ "aes_encryption": "True",
+ "scheduling_policy": "StrictPriority",
+ "priority_q": 2,
+ "weight": 0,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ },
+ {
+ "pbit_map": "0b11000000",
+ "aes_encryption": "True",
+ "scheduling_policy": "StrictPriority",
+ "priority_q": 1,
+ "weight": 25,
+ "discard_policy": "TailDrop",
+ "max_q_size": "auto",
+ "discard_config": {
+ "min_threshold": 0,
+ "max_threshold": 0,
+ "max_probability": 0
+ }
+ }
+ ]
+}
+
+#Deleting Technology Profiles
+A technology profile or a technology profile tree can be removed using etcdctl rm.
+
+# Remove a specific technology profile
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm /xgspon/64
+
+# Remove all technology profiles associated with Technology xgspon and ID 64(including the profile ID key)
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm --dir -r /xgspon/64
diff --git a/pkg/common/techprofile/config.go b/pkg/common/techprofile/config.go
new file mode 100644
index 0000000..f9655a8
--- /dev/null
+++ b/pkg/common/techprofile/config.go
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package techprofile
+
+import (
+ "github.com/opencord/voltha-lib-go/pkg/db/model"
+)
+
+// tech profile default constants
+const (
+ defaultTechProfileName = "Default_1tcont_1gem_Profile"
+ DEFAULT_TECH_PROFILE_TABLE_ID = 64
+ defaultVersion = 1.0
+ defaultLogLevel = 0
+ defaultGemportsCount = 1
+ defaultNumTconts = 1
+ defaultPbits = "0b11111111"
+
+ defaultKVStoreType = "etcd"
+ defaultKVStoreTimeout = 5 //in seconds
+ defaultKVStoreHost = "127.0.0.1"
+ defaultKVStorePort = 2379 // Consul = 8500; Etcd = 2379
+
+ // Tech profile path prefix in kv store
+ defaultKVPathPrefix = "service/voltha/technology_profiles"
+
+ // Tech profile path in kv store
+ defaultTechProfileKVPath = "%s/%d" // <technology>/<tech_profile_tableID>
+
+ // Tech profile instance path in kv store
+ // Format: <technology>/<tech_profile_tableID>/<uni_port_name>
+ defaultTPInstanceKVPath = "%s/%d/%s"
+)
+
+//Tech-Profile JSON String Keys
+// NOTE: Tech profile templeate JSON file should comply with below keys
+const (
+ NAME = "name"
+ PROFILE_TYPE = "profile_type"
+ VERSION = "version"
+ NUM_GEM_PORTS = "num_gem_ports"
+ INSTANCE_CONTROL = "instance_control"
+ US_SCHEDULER = "us_scheduler"
+ DS_SCHEDULER = "ds_scheduler"
+ UPSTREAM_GEM_PORT_ATTRIBUTE_LIST = "upstream_gem_port_attribute_list"
+ DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST = "downstream_gem_port_attribute_list"
+ ONU = "onu"
+ UNI = "uni"
+ MAX_GEM_PAYLOAD_SIZE = "max_gem_payload_size"
+ DIRECTION = "direction"
+ ADDITIONAL_BW = "additional_bw"
+ PRIORITY = "priority"
+ Q_SCHED_POLICY = "q_sched_policy"
+ WEIGHT = "weight"
+ PBIT_MAP = "pbit_map"
+ DISCARD_CONFIG = "discard_config"
+ MAX_THRESHOLD = "max_threshold"
+ MIN_THRESHOLD = "min_threshold"
+ MAX_PROBABILITY = "max_probability"
+ DISCARD_POLICY = "discard_policy"
+ PRIORITY_Q = "priority_q"
+ SCHEDULING_POLICY = "scheduling_policy"
+ MAX_Q_SIZE = "max_q_size"
+ AES_ENCRYPTION = "aes_encryption"
+)
+
+// TechprofileFlags represents the set of configurations used
+type TechProfileFlags struct {
+ KVStoreHost string
+ KVStorePort int
+ KVStoreType string
+ KVStoreTimeout int
+ KVBackend *model.Backend
+ TPKVPathPrefix string
+ TPFileKVPath string
+ TPInstanceKVPath string
+ DefaultTPName string
+ TPVersion int
+ NumGemPorts uint32
+ NumTconts uint32
+ DefaultPbits []string
+ LogLevel int
+ DefaultTechProfileID uint32
+ DefaultNumGemPorts uint32
+ DefaultNumTconts uint32
+}
+
+func NewTechProfileFlags(KVStoreType string, KVStoreHost string, KVStorePort int) *TechProfileFlags {
+ // initialize with default values
+ var techProfileFlags = TechProfileFlags{
+ KVBackend: nil,
+ KVStoreHost: KVStoreHost,
+ KVStorePort: KVStorePort,
+ KVStoreType: KVStoreType,
+ KVStoreTimeout: defaultKVStoreTimeout,
+ DefaultTPName: defaultTechProfileName,
+ TPKVPathPrefix: defaultKVPathPrefix,
+ TPVersion: defaultVersion,
+ TPFileKVPath: defaultTechProfileKVPath,
+ TPInstanceKVPath: defaultTPInstanceKVPath,
+ DefaultTechProfileID: DEFAULT_TECH_PROFILE_TABLE_ID,
+ DefaultNumGemPorts: defaultGemportsCount,
+ DefaultNumTconts: defaultNumTconts,
+ DefaultPbits: []string{defaultPbits},
+ LogLevel: defaultLogLevel,
+ }
+
+ return &techProfileFlags
+}
diff --git a/pkg/common/techprofile/tech_profile.go b/pkg/common/techprofile/tech_profile.go
new file mode 100644
index 0000000..582fecc
--- /dev/null
+++ b/pkg/common/techprofile/tech_profile.go
@@ -0,0 +1,692 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package techprofile
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+
+ "github.com/opencord/voltha-lib-go/pkg/common/log"
+ "github.com/opencord/voltha-lib-go/pkg/db/kvstore"
+ "github.com/opencord/voltha-lib-go/pkg/db/model"
+ tp_pb "github.com/opencord/voltha-protos/go/tech_profile"
+)
+
+// Interface to pon resource manager APIs
+type iPonResourceMgr interface {
+ GetResourceID(IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error)
+ GetResourceTypeAllocID() string
+ GetResourceTypeGemPortID() string
+ GetTechnology() string
+}
+
+type Direction int32
+
+const (
+ Direction_UPSTREAM Direction = 0
+ Direction_DOWNSTREAM Direction = 1
+ Direction_BIDIRECTIONAL Direction = 2
+)
+
+var Direction_name = map[Direction]string{
+ 0: "UPSTREAM",
+ 1: "DOWNSTREAM",
+ 2: "BIDIRECTIONAL",
+}
+
+type SchedulingPolicy int32
+
+const (
+ SchedulingPolicy_WRR SchedulingPolicy = 0
+ SchedulingPolicy_StrictPriority SchedulingPolicy = 1
+ SchedulingPolicy_Hybrid SchedulingPolicy = 2
+)
+
+var SchedulingPolicy_name = map[SchedulingPolicy]string{
+ 0: "WRR",
+ 1: "StrictPriority",
+ 2: "Hybrid",
+}
+
+type AdditionalBW int32
+
+const (
+ AdditionalBW_AdditionalBW_None AdditionalBW = 0
+ AdditionalBW_AdditionalBW_NA AdditionalBW = 1
+ AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2
+ AdditionalBW_AdditionalBW_Auto AdditionalBW = 3
+)
+
+var AdditionalBW_name = map[AdditionalBW]string{
+ 0: "AdditionalBW_None",
+ 1: "AdditionalBW_NA",
+ 2: "AdditionalBW_BestEffort",
+ 3: "AdditionalBW_Auto",
+}
+
+type DiscardPolicy int32
+
+const (
+ DiscardPolicy_TailDrop DiscardPolicy = 0
+ DiscardPolicy_WTailDrop DiscardPolicy = 1
+ DiscardPolicy_Red DiscardPolicy = 2
+ DiscardPolicy_WRed DiscardPolicy = 3
+)
+
+var DiscardPolicy_name = map[DiscardPolicy]string{
+ 0: "TailDrop",
+ 1: "WTailDrop",
+ 2: "Red",
+ 3: "WRed",
+}
+
+/*
+type InferredAdditionBWIndication int32
+
+const (
+ InferredAdditionBWIndication_InferredAdditionBWIndication_None InferredAdditionBWIndication = 0
+ InferredAdditionBWIndication_InferredAdditionBWIndication_Assured InferredAdditionBWIndication = 1
+ InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort InferredAdditionBWIndication = 2
+)
+
+var InferredAdditionBWIndication_name = map[int32]string{
+ 0: "InferredAdditionBWIndication_None",
+ 1: "InferredAdditionBWIndication_Assured",
+ 2: "InferredAdditionBWIndication_BestEffort",
+}
+*/
+// instance control defaults
+const (
+ defaultOnuInstance = "multi-instance"
+ defaultUniInstance = "single-instance"
+ defaultNumGemPorts = 1
+ defaultGemPayloadSize = "auto"
+)
+
+const MAX_GEM_PAYLOAD = "max_gem_payload_size"
+
+type InstanceControl struct {
+ Onu string `json:"ONU"`
+ Uni string `json:"uni"`
+ MaxGemPayloadSize string `json:"max_gem_payload_size"`
+}
+
+// default discard config constants
+const (
+ defaultMinThreshold = 0
+ defaultMaxThreshold = 0
+ defaultMaxProbability = 0
+)
+
+type DiscardConfig struct {
+ MinThreshold int `json:"min_threshold"`
+ MaxThreshold int `json:"max_threshold"`
+ MaxProbability int `json:"max_probability"`
+}
+
+// default scheduler contants
+const (
+ defaultAdditionalBw = AdditionalBW_AdditionalBW_BestEffort
+ defaultPriority = 0
+ defaultWeight = 0
+ defaultQueueSchedPolicy = SchedulingPolicy_Hybrid
+)
+
+type Scheduler struct {
+ Direction string `json:"direction"`
+ AdditionalBw string `json:"additional_bw"`
+ Priority uint32 `json:"priority"`
+ Weight uint32 `json:"weight"`
+ QSchedPolicy string `json:"q_sched_policy"`
+}
+
+// default GEM attribute constants
+const (
+ defaultAESEncryption = "True"
+ defaultPriorityQueue = 0
+ defaultQueueWeight = 0
+ defaultMaxQueueSize = "auto"
+ defaultdropPolicy = DiscardPolicy_TailDrop
+ defaultSchedulePolicy = SchedulingPolicy_WRR
+)
+
+type GemPortAttribute struct {
+ MaxQueueSize string `json:"max_q_size"`
+ PbitMap string `json:"pbit_map"`
+ AesEncryption string `json:"aes_encryption"`
+ SchedulingPolicy string `json:"scheduling_policy"`
+ PriorityQueue uint32 `json:"priority_q"`
+ Weight uint32 `json:"weight"`
+ DiscardPolicy string `json:"discard_policy"`
+ DiscardConfig DiscardConfig `json:"discard_config"`
+}
+
+type iScheduler struct {
+ AllocID uint32 `json:"alloc_id"`
+ Direction string `json:"direction"`
+ AdditionalBw string `json:"additional_bw"`
+ Priority uint32 `json:"priority"`
+ Weight uint32 `json:"weight"`
+ QSchedPolicy string `json:"q_sched_policy"`
+}
+type iGemPortAttribute struct {
+ GemportID uint32 `json:"gemport_id"`
+ MaxQueueSize string `json:"max_q_size"`
+ PbitMap string `json:"pbit_map"`
+ AesEncryption string `json:"aes_encryption"`
+ SchedulingPolicy string `json:"scheduling_policy"`
+ PriorityQueue uint32 `json:"priority_q"`
+ Weight uint32 `json:"weight"`
+ DiscardPolicy string `json:"discard_policy"`
+ DiscardConfig DiscardConfig `json:"discard_config"`
+}
+
+type TechProfileMgr struct {
+ config *TechProfileFlags
+ resourceMgr iPonResourceMgr
+}
+type DefaultTechProfile struct {
+ Name string `json:"name"`
+ ProfileType string `json:"profile_type"`
+ Version int `json:"version"`
+ NumGemPorts uint32 `json:"num_gem_ports"`
+ InstanceCtrl InstanceControl `json:"instance_control"`
+ UsScheduler Scheduler `json:"us_scheduler"`
+ DsScheduler Scheduler `json:"ds_scheduler"`
+ UpstreamGemPortAttributeList []GemPortAttribute `json:"upstream_gem_port_attribute_list"`
+ DownstreamGemPortAttributeList []GemPortAttribute `json:"downstream_gem_port_attribute_list"`
+}
+type TechProfile struct {
+ Name string `json:"name"`
+ SubscriberIdentifier string `json:"subscriber_identifier"`
+ ProfileType string `json:"profile_type"`
+ Version int `json:"version"`
+ NumGemPorts uint32 `json:"num_gem_ports"`
+ NumTconts uint32 `json:"num_of_tconts"`
+ InstanceCtrl InstanceControl `json:"instance_control"`
+ UsScheduler iScheduler `json:"us_scheduler"`
+ DsScheduler iScheduler `json:"ds_scheduler"`
+ UpstreamGemPortAttributeList []iGemPortAttribute `json:"upstream_gem_port_attribute_list"`
+ DownstreamGemPortAttributeList []iGemPortAttribute `json:"downstream_gem_port_attribute_list"`
+}
+
+func (t *TechProfileMgr) SetKVClient() *model.Backend {
+ addr := t.config.KVStoreHost + ":" + strconv.Itoa(t.config.KVStorePort)
+ kvClient, err := newKVClient(t.config.KVStoreType, addr, t.config.KVStoreTimeout)
+ if err != nil {
+ log.Errorw("failed-to-create-kv-client",
+ log.Fields{
+ "type": t.config.KVStoreType, "host": t.config.KVStoreHost, "port": t.config.KVStorePort,
+ "timeout": t.config.KVStoreTimeout, "prefix": t.config.TPKVPathPrefix,
+ "error": err.Error(),
+ })
+ return nil
+ }
+ return &model.Backend{
+ Client: kvClient,
+ StoreType: t.config.KVStoreType,
+ Host: t.config.KVStoreHost,
+ Port: t.config.KVStorePort,
+ Timeout: t.config.KVStoreTimeout,
+ PathPrefix: t.config.TPKVPathPrefix}
+
+ /* TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
+ issue between kv store and backend , core is not calling NewBackend directly
+ kv := model.NewBackend(t.config.KVStoreType, t.config.KVStoreHost, t.config.KVStorePort,
+ t.config.KVStoreTimeout, kvStoreTechProfilePathPrefix)
+ */
+}
+
+func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
+
+ log.Infow("kv-store", log.Fields{"storeType": storeType, "address": address})
+ switch storeType {
+ case "consul":
+ return kvstore.NewConsulClient(address, timeout)
+ case "etcd":
+ return kvstore.NewEtcdClient(address, timeout)
+ }
+ return nil, errors.New("unsupported-kv-store")
+}
+
+func NewTechProfile(resourceMgr iPonResourceMgr, KVStoreType string, KVStoreHost string, KVStorePort int) (*TechProfileMgr, error) {
+ var techprofileObj TechProfileMgr
+ log.Debug("Initializing techprofile Manager")
+ techprofileObj.config = NewTechProfileFlags(KVStoreType, KVStoreHost, KVStorePort)
+ techprofileObj.config.KVBackend = techprofileObj.SetKVClient()
+ if techprofileObj.config.KVBackend == nil {
+ log.Error("Failed to initialize KV backend\n")
+ return nil, errors.New("KV backend init failed")
+ }
+ techprofileObj.resourceMgr = resourceMgr
+ log.Debug("Initializing techprofile object instance success")
+ return &techprofileObj, nil
+}
+
+func (t *TechProfileMgr) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string {
+ return fmt.Sprintf(t.config.TPInstanceKVPath, t.resourceMgr.GetTechnology(), techProfiletblID, uniPortName)
+}
+
+func (t *TechProfileMgr) GetTPInstanceFromKVStore(techProfiletblID uint32, path string) (*TechProfile, error) {
+ var KvTpIns TechProfile
+ var resPtr *TechProfile = &KvTpIns
+ var err error
+ /*path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)*/
+ log.Infow("Getting tech profile instance from KV store", log.Fields{"path": path})
+ kvresult, err := t.config.KVBackend.Get(path)
+ if err != nil {
+ log.Errorw("Error while fetching tech-profile instance from KV backend", log.Fields{"key": path})
+ return nil, err
+ }
+ if kvresult == nil {
+ log.Infow("Tech profile does not exist in KV store", log.Fields{"key": path})
+ resPtr = nil
+ } else {
+ if value, err := kvstore.ToByte(kvresult.Value); err == nil {
+ if err = json.Unmarshal(value, resPtr); err != nil {
+ log.Errorw("Error while unmarshal KV result", log.Fields{"key": path, "value": value})
+ }
+ }
+ }
+ return resPtr, err
+}
+
+func (t *TechProfileMgr) addTechProfInstanceToKVStore(techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error {
+ path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+ log.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
+ tpInstanceJson, err := json.Marshal(*tpInstance)
+ if err == nil {
+ // Backend will convert JSON byte array into string format
+ log.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
+ err = t.config.KVBackend.Put(path, tpInstanceJson)
+ } else {
+ log.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
+ }
+ return err
+}
+func (t *TechProfileMgr) getTPFromKVStore(techProfiletblID uint32) *DefaultTechProfile {
+ var kvtechprofile DefaultTechProfile
+ key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
+ log.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
+ kvresult, err := t.config.KVBackend.Get(key)
+ if err != nil {
+ log.Errorw("Error while fetching value from KV store", log.Fields{"key": key})
+ return nil
+ }
+ if kvresult != nil {
+ /* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
+ if value, err := kvstore.ToByte(kvresult.Value); err == nil {
+ if err = json.Unmarshal(value, &kvtechprofile); err == nil {
+ log.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
+ return &kvtechprofile
+ }
+ }
+ }
+ return nil
+}
+func (t *TechProfileMgr) CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfId uint32) *TechProfile {
+ var tpInstance *TechProfile
+ log.Infow("Creating tech profile instance ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
+ tp := t.getTPFromKVStore(techProfiletblID)
+ if tp != nil {
+ log.Infow("Creating tech profile instance with profile from KV store", log.Fields{"tpid": techProfiletblID})
+ } else {
+ tp = t.getDefaultTechProfile()
+ log.Infow("Creating tech profile instance with default values", log.Fields{"tpid": techProfiletblID})
+ }
+ tpInstance = t.allocateTPInstance(uniPortName, tp, intfId, t.config.DefaultNumTconts)
+ if err := t.addTechProfInstanceToKVStore(techProfiletblID, uniPortName, tpInstance); err != nil {
+ log.Errorw("Error in adding tech profile instance to KV ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
+ return nil
+ }
+ log.Infow("Added tech profile instance to KV store successfully ",
+ log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
+ return tpInstance
+}
+
+func (t *TechProfileMgr) DeleteTechProfileInstance(techProfiletblID uint32, uniPortName string) error {
+ path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+ return t.config.KVBackend.Delete(path)
+}
+
+func (t *TechProfileMgr) allocateTPInstance(uniPortName string, tp *DefaultTechProfile, intfId uint32, numOfTconts uint32) *TechProfile {
+
+ var usGemPortAttributeList []iGemPortAttribute
+ var dsGemPortAttributeList []iGemPortAttribute
+ var tcontIDs []uint32
+ var gemPorts []uint32
+ var err error
+
+ log.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numOfTconts": numOfTconts, "numGem": tp.NumGemPorts})
+ if numOfTconts > 1 {
+ log.Errorw("Multiple Tconts not supported currently", log.Fields{"uniPortName": uniPortName, "intfId": intfId})
+ return nil
+ }
+ if tcontIDs, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeAllocID(), numOfTconts); err != nil {
+ log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId, "numTconts": numOfTconts})
+ return nil
+ }
+ log.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
+ if gemPorts, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
+ log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
+ return nil
+ }
+ log.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+ for index := 0; index < int(tp.NumGemPorts); index++ {
+ usGemPortAttributeList = append(usGemPortAttributeList,
+ iGemPortAttribute{GemportID: gemPorts[index],
+ MaxQueueSize: tp.UpstreamGemPortAttributeList[index].MaxQueueSize,
+ PbitMap: tp.UpstreamGemPortAttributeList[index].PbitMap,
+ AesEncryption: tp.UpstreamGemPortAttributeList[index].AesEncryption,
+ SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
+ PriorityQueue: tp.UpstreamGemPortAttributeList[index].PriorityQueue,
+ Weight: tp.UpstreamGemPortAttributeList[index].Weight,
+ DiscardPolicy: tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
+ DiscardConfig: tp.UpstreamGemPortAttributeList[index].DiscardConfig})
+ dsGemPortAttributeList = append(dsGemPortAttributeList,
+ iGemPortAttribute{GemportID: gemPorts[index],
+ MaxQueueSize: tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
+ PbitMap: tp.DownstreamGemPortAttributeList[index].PbitMap,
+ AesEncryption: tp.DownstreamGemPortAttributeList[index].AesEncryption,
+ SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+ PriorityQueue: tp.DownstreamGemPortAttributeList[index].PriorityQueue,
+ Weight: tp.DownstreamGemPortAttributeList[index].Weight,
+ DiscardPolicy: tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+ DiscardConfig: tp.DownstreamGemPortAttributeList[index].DiscardConfig})
+ }
+ return &TechProfile{
+ SubscriberIdentifier: uniPortName,
+ Name: tp.Name,
+ ProfileType: tp.ProfileType,
+ Version: tp.Version,
+ NumGemPorts: tp.NumGemPorts,
+ NumTconts: numOfTconts,
+ InstanceCtrl: tp.InstanceCtrl,
+ UsScheduler: iScheduler{
+ AllocID: tcontIDs[0],
+ Direction: tp.UsScheduler.Direction,
+ AdditionalBw: tp.UsScheduler.AdditionalBw,
+ Priority: tp.UsScheduler.Priority,
+ Weight: tp.UsScheduler.Weight,
+ QSchedPolicy: tp.UsScheduler.QSchedPolicy},
+ DsScheduler: iScheduler{
+ AllocID: tcontIDs[0],
+ Direction: tp.DsScheduler.Direction,
+ AdditionalBw: tp.DsScheduler.AdditionalBw,
+ Priority: tp.DsScheduler.Priority,
+ Weight: tp.DsScheduler.Weight,
+ QSchedPolicy: tp.DsScheduler.QSchedPolicy},
+ UpstreamGemPortAttributeList: usGemPortAttributeList,
+ DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+func (t *TechProfileMgr) getDefaultTechProfile() *DefaultTechProfile {
+
+ var usGemPortAttributeList []GemPortAttribute
+ var dsGemPortAttributeList []GemPortAttribute
+
+ for _, pbit := range t.config.DefaultPbits {
+ log.Debugw("Creating GEM port", log.Fields{"pbit": pbit})
+ usGemPortAttributeList = append(usGemPortAttributeList,
+ GemPortAttribute{
+ MaxQueueSize: defaultMaxQueueSize,
+ PbitMap: pbit,
+ AesEncryption: defaultAESEncryption,
+ SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
+ PriorityQueue: defaultPriorityQueue,
+ Weight: defaultQueueWeight,
+ DiscardPolicy: DiscardPolicy_name[defaultdropPolicy],
+ DiscardConfig: DiscardConfig{
+ MinThreshold: defaultMinThreshold,
+ MaxThreshold: defaultMaxThreshold,
+ MaxProbability: defaultMaxProbability}})
+ dsGemPortAttributeList = append(dsGemPortAttributeList,
+ GemPortAttribute{
+ MaxQueueSize: defaultMaxQueueSize,
+ PbitMap: pbit,
+ AesEncryption: defaultAESEncryption,
+ SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
+ PriorityQueue: defaultPriorityQueue,
+ Weight: defaultQueueWeight,
+ DiscardPolicy: DiscardPolicy_name[defaultdropPolicy],
+ DiscardConfig: DiscardConfig{
+ MinThreshold: defaultMinThreshold,
+ MaxThreshold: defaultMaxThreshold,
+ MaxProbability: defaultMaxProbability}})
+ }
+ return &DefaultTechProfile{
+ Name: t.config.DefaultTPName,
+ ProfileType: t.resourceMgr.GetTechnology(),
+ Version: t.config.TPVersion,
+ NumGemPorts: uint32(len(usGemPortAttributeList)),
+ InstanceCtrl: InstanceControl{
+ Onu: defaultOnuInstance,
+ Uni: defaultUniInstance,
+ MaxGemPayloadSize: defaultGemPayloadSize},
+ UsScheduler: Scheduler{
+ Direction: Direction_name[Direction_UPSTREAM],
+ AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
+ Priority: defaultPriority,
+ Weight: defaultWeight,
+ QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
+ DsScheduler: Scheduler{
+ Direction: Direction_name[Direction_DOWNSTREAM],
+ AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
+ Priority: defaultPriority,
+ Weight: defaultWeight,
+ QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
+ UpstreamGemPortAttributeList: usGemPortAttributeList,
+ DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+func (t *TechProfileMgr) GetprotoBufParamValue(paramType string, paramKey string) int32 {
+ var result int32 = -1
+
+ if paramType == "direction" {
+ for key, val := range tp_pb.Direction_value {
+ if key == paramKey {
+ result = val
+ }
+ }
+ } else if paramType == "discard_policy" {
+ for key, val := range tp_pb.DiscardPolicy_value {
+ if key == paramKey {
+ result = val
+ }
+ }
+ } else if paramType == "sched_policy" {
+ for key, val := range tp_pb.SchedulingPolicy_value {
+ if key == paramKey {
+ log.Debugw("Got value in proto", log.Fields{"key": key, "value": val})
+ result = val
+ }
+ }
+ } else if paramType == "additional_bw" {
+ for key, val := range tp_pb.AdditionalBW_value {
+ if key == paramKey {
+ result = val
+ }
+ }
+ } else {
+ log.Error("Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
+ return -1
+ }
+ log.Debugw("Got value in proto", log.Fields{"key": paramKey, "value": result})
+ return result
+}
+
+func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) *tp_pb.SchedulerConfig {
+ dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction))
+ if dir == -1 {
+ log.Fatal("Error in getting Proto for direction for upstream scheduler")
+ return nil
+ }
+ bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw))
+ if bw == -1 {
+ log.Fatal("Error in getting Proto for bandwidth for upstream scheduler")
+ return nil
+ }
+ policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy))
+ if policy == -1 {
+ log.Fatal("Error in getting Proto for scheduling policy for upstream scheduler")
+ return nil
+ }
+ return &tp_pb.SchedulerConfig{
+ Direction: dir,
+ AdditionalBw: bw,
+ Priority: tpInstance.UsScheduler.Priority,
+ Weight: tpInstance.UsScheduler.Weight,
+ SchedPolicy: policy}
+}
+
+func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) *tp_pb.SchedulerConfig {
+
+ dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction))
+ if dir == -1 {
+ log.Fatal("Error in getting Proto for direction for downstream scheduler")
+ return nil
+ }
+ bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw))
+ if bw == -1 {
+ log.Fatal("Error in getting Proto for bandwidth for downstream scheduler")
+ return nil
+ }
+ policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy))
+ if policy == -1 {
+ log.Fatal("Error in getting Proto for scheduling policy for downstream scheduler")
+ return nil
+ }
+
+ return &tp_pb.SchedulerConfig{
+ Direction: dir,
+ AdditionalBw: bw,
+ Priority: tpInstance.DsScheduler.Priority,
+ Weight: tpInstance.DsScheduler.Weight,
+ SchedPolicy: policy}
+}
+
+func (t *TechProfileMgr) GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
+ ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler {
+
+ tSched := &tp_pb.TrafficScheduler{
+ Direction: SchedCfg.Direction,
+ AllocId: tpInstance.UsScheduler.AllocID,
+ TrafficShapingInfo: ShapingCfg,
+ Scheduler: SchedCfg}
+
+ return tSched
+}
+
+func (tpm *TechProfileMgr) GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) []*tp_pb.TrafficQueue {
+
+ var encryp bool
+ if Dir == tp_pb.Direction_UPSTREAM {
+ // upstream GEM ports
+ NumGemPorts := len(tp.UpstreamGemPortAttributeList)
+ GemPorts := make([]*tp_pb.TrafficQueue, 0)
+ for Count := 0; Count < NumGemPorts; Count++ {
+ if tp.UpstreamGemPortAttributeList[Count].AesEncryption == "True" {
+ encryp = true
+ } else {
+ encryp = false
+ }
+ GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
+ Direction: tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.UsScheduler.Direction)),
+ GemportId: tp.UpstreamGemPortAttributeList[Count].GemportID,
+ PbitMap: tp.UpstreamGemPortAttributeList[Count].PbitMap,
+ AesEncryption: encryp,
+ SchedPolicy: tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue("sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)),
+ Priority: tp.UpstreamGemPortAttributeList[Count].PriorityQueue,
+ Weight: tp.UpstreamGemPortAttributeList[Count].Weight,
+ DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)),
+ })
+ }
+ log.Debugw("Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+ return GemPorts
+ } else if Dir == tp_pb.Direction_DOWNSTREAM {
+ //downstream GEM ports
+ NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+ GemPorts := make([]*tp_pb.TrafficQueue, 0)
+ for Count := 0; Count < NumGemPorts; Count++ {
+ if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
+ encryp = true
+ } else {
+ encryp = false
+ }
+ GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
+ Direction: tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
+ GemportId: tp.DownstreamGemPortAttributeList[Count].GemportID,
+ PbitMap: tp.DownstreamGemPortAttributeList[Count].PbitMap,
+ AesEncryption: encryp,
+ SchedPolicy: tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
+ Priority: tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
+ Weight: tp.DownstreamGemPortAttributeList[Count].Weight,
+ DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
+ })
+ }
+ log.Debugw("Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+ return GemPorts
+ }
+ return nil
+}
+
+func (tpm *TechProfileMgr) GetUsTrafficScheduler(tp *TechProfile) *tp_pb.TrafficScheduler {
+ UsScheduler := tpm.GetUsScheduler(tp)
+
+ return &tp_pb.TrafficScheduler{Direction: UsScheduler.Direction,
+ AllocId: tp.UsScheduler.AllocID,
+ Scheduler: UsScheduler}
+}
+
+func (t *TechProfileMgr) GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32 {
+ /*
+ Function to get the Gemport ID mapped to a pbit.
+ */
+ if Dir == tp_pb.Direction_UPSTREAM {
+ // upstream GEM ports
+ NumGemPorts := len(tp.UpstreamGemPortAttributeList)
+ for Count := 0; Count < NumGemPorts; Count++ {
+ NumPbitMaps := len(tp.UpstreamGemPortAttributeList[Count].PbitMap)
+ for ICount := 2; ICount < NumPbitMaps; ICount++ {
+ if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[Count].PbitMap[ICount])); err == nil {
+ if uint32(ICount-2) == pbit && p == 1 { // Check this p-bit is set
+ log.Debugw("Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[Count].GemportID})
+ return tp.UpstreamGemPortAttributeList[Count].GemportID
+ }
+ }
+ }
+ }
+ } else if Dir == tp_pb.Direction_DOWNSTREAM {
+ //downstream GEM ports
+ NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+ for Count := 0; Count < NumGemPorts; Count++ {
+ NumPbitMaps := len(tp.DownstreamGemPortAttributeList[Count].PbitMap)
+ for ICount := 2; ICount < NumPbitMaps; ICount++ {
+ if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[Count].PbitMap[ICount])); err == nil {
+ if uint32(ICount-2) == pbit && p == 1 { // Check this p-bit is set
+ log.Debugw("Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[Count].GemportID})
+ return tp.DownstreamGemPortAttributeList[Count].GemportID
+ }
+ }
+ }
+ }
+ }
+ log.Errorw("No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+ return 0
+}
diff --git a/pkg/common/techprofile/tech_profile_if.go b/pkg/common/techprofile/tech_profile_if.go
new file mode 100644
index 0000000..de2fca4
--- /dev/null
+++ b/pkg/common/techprofile/tech_profile_if.go
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package techprofile
+
+import (
+ "github.com/opencord/voltha-lib-go/pkg/db/model"
+ tp_pb "github.com/opencord/voltha-protos/go/tech_profile"
+)
+
+type TechProfileIf interface {
+ SetKVClient() *model.Backend
+ GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string
+ GetTPInstanceFromKVStore(techProfiletblID uint32, path string) (*TechProfile, error)
+ CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfId uint32) *TechProfile
+ DeleteTechProfileInstance(techProfiletblID uint32, uniPortName string) error
+ GetprotoBufParamValue(paramType string, paramKey string) int32
+ GetUsScheduler(tpInstance *TechProfile) *tp_pb.SchedulerConfig
+ GetDsScheduler(tpInstance *TechProfile) *tp_pb.SchedulerConfig
+ GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
+ ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
+ GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) []*tp_pb.TrafficQueue
+ GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32
+}
diff --git a/pkg/common/version/version.go b/pkg/common/version/version.go
new file mode 100644
index 0000000..49c0b10
--- /dev/null
+++ b/pkg/common/version/version.go
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package version
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Default build-time variable.
+// These values can (should) be overridden via ldflags when built with
+// `make`
+var (
+ version = "unknown-version"
+ goVersion = "unknown-goversion"
+ vcsRef = "unknown-vcsref"
+ vcsDirty = "unknown-vcsdirty"
+ buildTime = "unknown-buildtime"
+ os = "unknown-os"
+ arch = "unknown-arch"
+)
+
+type VersionInfoType struct {
+ Version string `json:"version"`
+ GoVersion string `json:"goversion"`
+ VcsRef string `json:"vcsref"`
+ VcsDirty string `json:"vcsdirty"`
+ BuildTime string `json:"buildtime"`
+ Os string `json:"os"`
+ Arch string `json:"arch"`
+}
+
+var VersionInfo VersionInfoType
+
+func init() {
+ VersionInfo = VersionInfoType{
+ Version: version,
+ VcsRef: vcsRef,
+ VcsDirty: vcsDirty,
+ GoVersion: goVersion,
+ Os: os,
+ Arch: arch,
+ BuildTime: buildTime,
+ }
+}
+
+func (v VersionInfoType) String(indent string) string {
+ builder := strings.Builder{}
+
+ builder.WriteString(fmt.Sprintf("%sVersion: %s\n", indent, VersionInfo.Version))
+ builder.WriteString(fmt.Sprintf("%sGoVersion: %s\n", indent, VersionInfo.GoVersion))
+ builder.WriteString(fmt.Sprintf("%sVCS Ref: %s\n", indent, VersionInfo.VcsRef))
+ builder.WriteString(fmt.Sprintf("%sVCS Dirty: %s\n", indent, VersionInfo.VcsDirty))
+ builder.WriteString(fmt.Sprintf("%sBuilt: %s\n", indent, VersionInfo.BuildTime))
+ builder.WriteString(fmt.Sprintf("%sOS/Arch: %s/%s\n", indent, VersionInfo.Os, VersionInfo.Arch))
+ return builder.String()
+}