diff --git a/go.mod b/go.mod index e1614469..6f59bb65 100644 --- a/go.mod +++ b/go.mod @@ -43,6 +43,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/kr/pretty v0.3.0 // indirect github.com/leodido/go-urn v1.2.4 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/mitchellh/mapstructure v1.4.2 // indirect @@ -50,6 +51,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rogpeppe/go-internal v1.8.0 // indirect github.com/russross/blackfriday/v2 v2.0.1 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect @@ -64,5 +66,6 @@ require ( golang.org/x/text v0.14.0 // indirect google.golang.org/appengine v1.6.6 // indirect google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 2b867b7a..348f08a4 100644 --- a/go.sum +++ b/go.sum @@ -34,6 +34,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/aead/cmac v0.0.0-20160719120800-7af84192f0b1/go.mod h1:nuudZmJhzWtx2212z+pkuy7B6nkBqa+xwNXZHL1j8cg= +github.com/aead/cmac v0.0.0-20160719120800-7af84192f0b1/go.mod h1:nuudZmJhzWtx2212z+pkuy7B6nkBqa+xwNXZHL1j8cg= github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antonfisher/nested-logrus-formatter v1.3.1 h1:NFJIr+pzwv5QLHTPyKz9UMEoHck02Q9L0FP13b/xSbQ= @@ -54,6 +55,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -173,11 +176,14 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= @@ -197,12 +203,16 @@ github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= @@ -262,6 +272,11 @@ golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= @@ -400,6 +415,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -538,8 +554,9 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY= gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0= diff --git a/internal/context/charging.go b/internal/context/charging.go new file mode 100644 index 00000000..b8c73e16 --- /dev/null +++ b/internal/context/charging.go @@ -0,0 +1,33 @@ +package context + +import ( + "github.com/free5gc/openapi/models" +) + +type ChargingLevel uint8 + +// For a rating group that is pdu session charging level, all volume in a pdu session will be charged +// For a rating group that is flow charging level (or Rating group level (32.255)), +// only volume in a flow will be charged +const ( + PduSessionCharging ChargingLevel = iota + FlowCharging +) + +type RequestType uint8 + +// For each charging event, it will have a corresponding charging request type, see 32.255 Table 5.2.1.4.1 +const ( + CHARGING_INIT RequestType = iota + CHARGING_UPDATE + CHARGING_RELEASE +) + +type ChargingInfo struct { + ChargingMethod models.QuotaManagementIndicator + VolumeLimitExpiryTimer *Timer + EventLimitExpiryTimer *Timer + ChargingLevel ChargingLevel + RatingGroup int32 + UpfId string +} diff --git a/internal/context/context.go b/internal/context/context.go index ce342f1b..b9d4ced5 100644 --- a/internal/context/context.go +++ b/internal/context/context.go @@ -3,6 +3,7 @@ package context import ( "context" "fmt" + "math" "net" "os" "sync/atomic" @@ -18,6 +19,7 @@ import ( "github.com/free5gc/pfcp/pfcpType" "github.com/free5gc/smf/internal/logger" "github.com/free5gc/smf/pkg/factory" + "github.com/free5gc/util/idgenerator" ) func Init() { @@ -79,6 +81,16 @@ type SMFContext struct { UEPreConfigPathPool map[string]*UEPreConfigPaths UEDefaultPathPool map[string]*UEDefaultPaths LocalSEIDCount uint64 + + // Each pdu session should have a unique charging id + ChargingIDGenerator *idgenerator.IDGenerator +} + +func GenerateChargingID() int32 { + if id, err := smfContext.ChargingIDGenerator.Allocate(); err == nil { + return int32(id) + } + return 0 } func ResolveIP(host string) net.IP { @@ -241,6 +253,8 @@ func InitSmfContext(config *factory.Config) { smfContext.UserPlaneInformation = NewUserPlaneInformation(&configuration.UserPlaneInformation) + smfContext.ChargingIDGenerator = idgenerator.NewGenerator(1, math.MaxUint32) + SetupNFProfile(config) smfContext.Locality = configuration.Locality diff --git a/internal/context/datapath.go b/internal/context/datapath.go index de75c44c..d8a31d59 100644 --- a/internal/context/datapath.go +++ b/internal/context/datapath.go @@ -3,6 +3,7 @@ package context import ( "fmt" "strconv" + "strings" "github.com/google/uuid" @@ -306,6 +307,14 @@ func (dataPathPool DataPathPool) GetDefaultPath() *DataPath { return nil } +func (dataPathPool DataPathPool) ResetDefaultPath() error { + for _, path := range dataPathPool { + path.IsDefaultPath = false + } + + return nil +} + func (dataPath *DataPath) String() string { firstDPNode := dataPath.FirstDPNode @@ -345,6 +354,10 @@ func getUrrIdKey(uuid string, urrId uint32) string { return uuid + ":" + strconv.Itoa(int(urrId)) } +func GetUpfIdFromUrrIdKey(urrIdKey string) string { + return strings.Split(urrIdKey, ":")[0] +} + func (node DataPathNode) addUrrToNode(smContext *SMContext, urrId uint32, isMeasurePkt, isMeasureBeforeQos bool) { var urr *URR var ok bool @@ -360,19 +373,19 @@ func (node DataPathNode) addUrrToNode(smContext *SMContext, urrId uint32, isMeas logger.PduSessLog.Errorln("new URR failed") return } - smContext.UrrUpfMap[id] = urr } if urr != nil { if node.UpLinkTunnel != nil && node.UpLinkTunnel.PDR != nil { - node.UpLinkTunnel.PDR.URR = append(node.UpLinkTunnel.PDR.URR, urr) + node.UpLinkTunnel.PDR.AppendURRs([]*URR{urr}) } if node.DownLinkTunnel != nil && node.DownLinkTunnel.PDR != nil { - node.DownLinkTunnel.PDR.URR = append(node.DownLinkTunnel.PDR.URR, urr) + node.DownLinkTunnel.PDR.AppendURRs([]*URR{urr}) } } } +// Add reserve urr to datapath UPF func (datapath *DataPath) addUrrToPath(smContext *SMContext) { if smContext.UrrReportTime == 0 && smContext.UrrReportThreshold == 0 { logger.PduSessLog.Errorln("URR Report time and threshold is 0") @@ -385,16 +398,17 @@ func (datapath *DataPath) addUrrToPath(smContext *SMContext) { if curDataPathNode.IsANUPF() { if curDataPathNode.Next() == nil { - MBQEUrrId = smContext.UrrIdMap[N3N6_MBEQ_URR] - MAQEUrrId = smContext.UrrIdMap[N3N6_MAEQ_URR] + MBQEUrrId = smContext.UrrIdMap[N3N6_MBQE_URR] + MAQEUrrId = smContext.UrrIdMap[N3N6_MAQE_URR] } else { - MBQEUrrId = smContext.UrrIdMap[N3N9_MBEQ_URR] - MAQEUrrId = smContext.UrrIdMap[N3N9_MAEQ_URR] + MBQEUrrId = smContext.UrrIdMap[N3N9_MBQE_URR] + MAQEUrrId = smContext.UrrIdMap[N3N9_MAQE_URR] } } else { - MBQEUrrId = smContext.UrrIdMap[N9N6_MBEQ_URR] - MAQEUrrId = smContext.UrrIdMap[N9N6_MAEQ_URR] + MBQEUrrId = smContext.UrrIdMap[N9N6_MBQE_URR] + MAQEUrrId = smContext.UrrIdMap[N9N6_MAQE_URR] } + curDataPathNode.addUrrToNode(smContext, MBQEUrrId, true, true) curDataPathNode.addUrrToNode(smContext, MAQEUrrId, true, false) } @@ -422,7 +436,7 @@ func (dataPath *DataPath) ActivateTunnelAndPDR(smContext *SMContext, precedence // Note: This should be after Activate Tunnels if smContext.UrrReportTime != 0 || smContext.UrrReportThreshold != 0 { dataPath.addUrrToPath(smContext) - logger.PduSessLog.Warn("Create URR") + logger.PduSessLog.Trace("Create URR") } else { logger.PduSessLog.Warn("No Create URR") } @@ -530,6 +544,8 @@ func (dataPath *DataPath) ActivateTunnelAndPDR(smContext *SMContext, precedence } ULFAR := ULPDR.FAR + // If the flow is disable, the tunnel and the session rules will not be created + ULFAR.ApplyAction = pfcpType.ApplyAction{ Buff: false, Drop: false, @@ -537,6 +553,7 @@ func (dataPath *DataPath) ActivateTunnelAndPDR(smContext *SMContext, precedence Forw: true, Nocp: false, } + ULFAR.ForwardingParameters = &ForwardingParameters{ DestinationInterface: pfcpType.DestinationInterface{ InterfaceValue: pfcpType.DestinationInterfaceCore, @@ -633,6 +650,7 @@ func (dataPath *DataPath) ActivateTunnelAndPDR(smContext *SMContext, precedence if nextDLDest := curDataPathNode.Prev(); nextDLDest != nil { logger.PduSessLog.Traceln("In DLPDR OuterHeaderCreation") nextDLTunnel := nextDLDest.DownLinkTunnel + // If the flow is disable, the tunnel and the session rules will not be created DLFAR.ApplyAction = pfcpType.ApplyAction{ Buff: false, @@ -712,6 +730,108 @@ func (p *DataPath) RemovePDR() { } } +func (p *DataPath) GetChargingUrr(smContext *SMContext) []*URR { + var chargingUrrs []*URR + var urrs []*URR + + for node := p.FirstDPNode; node != nil; node = node.Next() { + // Charging rules only apply to anchor UPF + // Note: ULPDR and DLPDR share the same URR but have different FAR + // See AddChargingRules() for more details + if node.IsAnchorUPF() { + if node.UpLinkTunnel != nil && node.UpLinkTunnel.PDR != nil { + urrs = node.UpLinkTunnel.PDR.URR + } else if node.DownLinkTunnel != nil && node.DownLinkTunnel.PDR != nil { + urrs = node.DownLinkTunnel.PDR.URR + } + + for _, urr := range urrs { + if smContext.ChargingInfo[urr.URRID] != nil { + chargingUrrs = append(chargingUrrs, urr) + } + } + } + } + + return chargingUrrs +} + +func (p *DataPath) AddChargingRules(smContext *SMContext, chgLevel ChargingLevel, chgData *models.ChargingData) { + if chgData == nil { + return + } + + for node := p.FirstDPNode; node != nil; node = node.Next() { + // Charging rules only apply to anchor UPF + if node.IsAnchorUPF() { + var urr *URR + chgInfo := &ChargingInfo{ + RatingGroup: chgData.RatingGroup, + ChargingLevel: chgLevel, + UpfId: node.UPF.UUID(), + } + + // urrId, err := node.UPF.urrIDGenerator.Allocate() + urrId, err := smContext.UrrIDGenerator.Allocate() + if err != nil { + logger.PduSessLog.Errorln("Generate URR Id failed") + return + } + + currentUUID := node.UPF.UUID() + id := getUrrIdKey(currentUUID, uint32(urrId)) + + if oldURR, ok := smContext.UrrUpfMap[id]; !ok { + // For online charging, the charging trigger "Start of the Service data flow" are needed. + // Therefore, the START reporting trigger in the urr are needed to detect the Start of the SDF + if chgData.Online { + if newURR, err := node.UPF.AddURR(uint32(urrId), + NewMeasureInformation(false, false), + SetStartOfSDFTrigger()); err != nil { + logger.PduSessLog.Errorln("new URR failed") + return + } else { + urr = newURR + } + + chgInfo.ChargingMethod = models.QuotaManagementIndicator_ONLINE_CHARGING + } else if chgData.Offline { + // For offline charging, URR only need to report based on the volume threshold + if newURR, err := node.UPF.AddURR(uint32(urrId), + NewMeasureInformation(false, false), + NewVolumeThreshold(smContext.UrrReportThreshold)); err != nil { + logger.PduSessLog.Errorln("new URR failed") + return + } else { + urr = newURR + } + + chgInfo.ChargingMethod = models.QuotaManagementIndicator_OFFLINE_CHARGING + } + smContext.UrrUpfMap[id] = urr + } else { + urr = oldURR + } + + if urr != nil { + logger.PduSessLog.Tracef("Successfully add URR %d for Rating group %d", urr.URRID, chgData.RatingGroup) + + smContext.ChargingInfo[urr.URRID] = chgInfo + if node.UpLinkTunnel != nil && node.UpLinkTunnel.PDR != nil { + if !isUrrExist(node.UpLinkTunnel.PDR.URR, urr) { + node.UpLinkTunnel.PDR.AppendURRs([]*URR{urr}) + } + } + if node.DownLinkTunnel != nil && node.DownLinkTunnel.PDR != nil { + if !isUrrExist(node.DownLinkTunnel.PDR.URR, urr) { + node.DownLinkTunnel.PDR.AppendURRs([]*URR{urr}) + } + } + } + } + } +} + func (p *DataPath) AddQoS(smContext *SMContext, qfi uint8, qos *models.QosData) { // QFI = 1 -> default QFI if qos == nil && qfi != 1 { diff --git a/internal/context/pcc_rule.go b/internal/context/pcc_rule.go index 12f33171..1afd5a31 100644 --- a/internal/context/pcc_rule.go +++ b/internal/context/pcc_rule.go @@ -41,6 +41,14 @@ func (r *PCCRule) FlowDescription() string { return "" } +func (r *PCCRule) RefChgDataID() string { + if len(r.RefChgData) > 0 { + // now 1 pcc rule only maps to 1 Charging data + return r.RefChgData[0] + } + return "" +} + func (r *PCCRule) RefQosDataID() string { if len(r.RefQosData) > 0 { // now 1 pcc rule only maps to 1 QoS data @@ -61,6 +69,22 @@ func (r *PCCRule) RefTcDataID() string { return "" } +func (r *PCCRule) IdentifyChargingLevel() (ChargingLevel, error) { + dlIPFilterRule, err := flowdesc.Decode(r.FlowDescription()) + if err != nil { + return 0, err + } + // For the PCC rule that are applicable for all datapath, + // it's charging level will be PDU-based + if dlIPFilterRule.Src == "any" && dlIPFilterRule.Dst == "assigned" { + return PduSessionCharging, nil + } else { + // For the PCC rule that is applicable for all datapath for a datapath, + // it's charging level will be flow-based + return FlowCharging, nil + } +} + func (r *PCCRule) UpdateDataPathFlowDescription(dlFlowDesc string) error { if r.Datapath == nil { return fmt.Errorf("pcc[%s]: no data path", r.PccRuleId) @@ -69,26 +93,10 @@ func (r *PCCRule) UpdateDataPathFlowDescription(dlFlowDesc string) error { if dlFlowDesc == "" { return fmt.Errorf("pcc[%s]: no flow description", r.PccRuleId) } - ulFlowDesc := getUplinkFlowDescription(dlFlowDesc) - if ulFlowDesc == "" { - return fmt.Errorf("pcc[%s]: uplink flow description parsing error", r.PccRuleId) - } - r.Datapath.UpdateFlowDescription(ulFlowDesc, dlFlowDesc) - return nil -} - -func getUplinkFlowDescription(dlFlowDesc string) string { - ulIPFilterRule, err := flowdesc.Decode(dlFlowDesc) - if err != nil { - return "" - } - ulIPFilterRule.SwapSrcAndDst() - ulFlowDesc, err := flowdesc.Encode(ulIPFilterRule) - if err != nil { - return "" - } - return ulFlowDesc + ulFlowDesc := dlFlowDesc + r.Datapath.UpdateFlowDescription(ulFlowDesc, dlFlowDesc) // UL, DL flow description should be same + return nil } func (r *PCCRule) AddDataPathForwardingParameters(c *SMContext, @@ -113,8 +121,12 @@ func (r *PCCRule) AddDataPathForwardingParameters(c *SMContext, return } } - r.Datapath.AddForwardingParameters(routeProf.ForwardingPolicyID, - c.Tunnel.DataPathPool.GetDefaultPath().FirstDPNode.GetUpLinkPDR().PDI.LocalFTeid.Teid) + if c.Tunnel.DataPathPool.GetDefaultPath() == nil { + logger.CtxLog.Infoln("No Default Data Path") + } else { + r.Datapath.AddForwardingParameters(routeProf.ForwardingPolicyID, + c.Tunnel.DataPathPool.GetDefaultPath().FirstDPNode.GetUpLinkPDR().PDI.LocalFTeid.Teid) + } } func (r *PCCRule) BuildNasQoSRule(smCtx *SMContext, @@ -195,7 +207,7 @@ func createNasPacketFilter( } } - if ipFilterRule.Dst != "any" { + if ipFilterRule.Dst != "assigned" { _, ipNet, err := net.ParseCIDR(ipFilterRule.Dst) if err != nil { return nil, fmt.Errorf("parse IP fail: %s", err) diff --git a/internal/context/pfcp_reports.go b/internal/context/pfcp_reports.go new file mode 100644 index 00000000..14bc737d --- /dev/null +++ b/internal/context/pfcp_reports.go @@ -0,0 +1,95 @@ +package context + +import ( + "github.com/free5gc/openapi/models" + "github.com/free5gc/pfcp" + "github.com/free5gc/pfcp/pfcpType" + "github.com/free5gc/smf/internal/logger" +) + +func (smContext *SMContext) HandleReports( + UsageReportRequest []*pfcp.UsageReportPFCPSessionReportRequest, + UsageReportModification []*pfcp.UsageReportPFCPSessionModificationResponse, + UsageReportDeletion []*pfcp.UsageReportPFCPSessionDeletionResponse, + nodeId pfcpType.NodeID, reportTpye models.TriggerType, +) { + var usageReport UsageReport + upf := RetrieveUPFNodeByNodeID(nodeId) + upfId := upf.UUID() + + for _, report := range UsageReportRequest { + usageReport.UrrId = report.URRID.UrrIdValue + usageReport.UpfId = upfId + usageReport.TotalVolume = report.VolumeMeasurement.TotalVolume + usageReport.UplinkVolume = report.VolumeMeasurement.UplinkVolume + usageReport.DownlinkVolume = report.VolumeMeasurement.DownlinkVolume + usageReport.TotalPktNum = report.VolumeMeasurement.TotalPktNum + usageReport.UplinkPktNum = report.VolumeMeasurement.UplinkPktNum + usageReport.DownlinkPktNum = report.VolumeMeasurement.DownlinkPktNum + usageReport.ReportTpye = identityTriggerType(report.UsageReportTrigger) + + if reportTpye != "" { + usageReport.ReportTpye = reportTpye + } + + smContext.UrrReports = append(smContext.UrrReports, usageReport) + } + for _, report := range UsageReportModification { + usageReport.UrrId = report.URRID.UrrIdValue + usageReport.UpfId = upfId + usageReport.TotalVolume = report.VolumeMeasurement.TotalVolume + usageReport.UplinkVolume = report.VolumeMeasurement.UplinkVolume + usageReport.DownlinkVolume = report.VolumeMeasurement.DownlinkVolume + usageReport.TotalPktNum = report.VolumeMeasurement.TotalPktNum + usageReport.UplinkPktNum = report.VolumeMeasurement.UplinkPktNum + usageReport.DownlinkPktNum = report.VolumeMeasurement.DownlinkPktNum + usageReport.ReportTpye = identityTriggerType(report.UsageReportTrigger) + + if reportTpye != "" { + usageReport.ReportTpye = reportTpye + } + + smContext.UrrReports = append(smContext.UrrReports, usageReport) + } + for _, report := range UsageReportDeletion { + usageReport.UrrId = report.URRID.UrrIdValue + usageReport.UpfId = upfId + usageReport.TotalVolume = report.VolumeMeasurement.TotalVolume + usageReport.UplinkVolume = report.VolumeMeasurement.UplinkVolume + usageReport.DownlinkVolume = report.VolumeMeasurement.DownlinkVolume + usageReport.TotalPktNum = report.VolumeMeasurement.TotalPktNum + usageReport.UplinkPktNum = report.VolumeMeasurement.UplinkPktNum + usageReport.DownlinkPktNum = report.VolumeMeasurement.DownlinkPktNum + usageReport.ReportTpye = identityTriggerType(report.UsageReportTrigger) + + if reportTpye != "" { + usageReport.ReportTpye = reportTpye + } + + smContext.UrrReports = append(smContext.UrrReports, usageReport) + } +} + +func identityTriggerType(usarTrigger *pfcpType.UsageReportTrigger) models.TriggerType { + var trigger models.TriggerType + + if usarTrigger.Volth { + trigger = models.TriggerType_QUOTA_THRESHOLD + } else if usarTrigger.Volqu { + trigger = models.TriggerType_QUOTA_EXHAUSTED + } else if usarTrigger.Quvti { + trigger = models.TriggerType_VALIDITY_TIME + } else if usarTrigger.Start { + trigger = models.TriggerType_START_OF_SERVICE_DATA_FLOW + } else if usarTrigger.Immer { + logger.PduSessLog.Trace("Reports Query by SMF, trigger should be filled later") + return "" + } else if usarTrigger.Termr { + trigger = models.TriggerType_FINAL + } else { + logger.PduSessLog.Trace("Report is not a charging trigger") + return "" + } + + return trigger +} diff --git a/internal/context/pfcp_rules.go b/internal/context/pfcp_rules.go index ca4ef3c0..6344ad27 100644 --- a/internal/context/pfcp_rules.go +++ b/internal/context/pfcp_rules.go @@ -11,6 +11,7 @@ const ( RULE_CREATE RuleState = 1 RULE_UPDATE RuleState = 2 RULE_REMOVE RuleState = 3 + RULE_QUERY RuleState = 4 ) type RuleState uint8 @@ -44,8 +45,10 @@ type URR struct { MeasureMethod string // vol or time ReportingTrigger pfcpType.ReportingTriggers MeasurementPeriod time.Duration + QuotaValidityTime time.Time MeasurementInformation pfcpType.MeasurementInformation VolumeThreshold uint64 + VolumeQuota uint64 State RuleState } @@ -60,16 +63,31 @@ func NewMeasureInformation(isMeasurePkt, isMeasureBeforeQos bool) UrrOpt { func NewMeasurementPeriod(time time.Duration) UrrOpt { return func(urr *URR) { + urr.ReportingTrigger.Perio = true urr.MeasurementPeriod = time } } func NewVolumeThreshold(threshold uint64) UrrOpt { return func(urr *URR) { + urr.ReportingTrigger.Volth = true urr.VolumeThreshold = threshold } } +func NewVolumeQuota(quota uint64) UrrOpt { + return func(urr *URR) { + urr.ReportingTrigger.Volqu = true + urr.VolumeQuota = quota + } +} + +func SetStartOfSDFTrigger() UrrOpt { + return func(urr *URR) { + urr.ReportingTrigger.Start = true + } +} + func MeasureInformation(isMeasurePkt, isMeasureBeforeQos bool) pfcpType.MeasurementInformation { var measureInformation pfcpType.MeasurementInformation measureInformation.Mnop = isMeasurePkt @@ -77,6 +95,23 @@ func MeasureInformation(isMeasurePkt, isMeasureBeforeQos bool) pfcpType.Measurem return measureInformation } +func (pdr *PDR) AppendURRs(urrs []*URR) { + for _, urr := range urrs { + if !isUrrExist(pdr.URR, urr) { + pdr.URR = append(pdr.URR, urr) + } + } +} + +func isUrrExist(URRs []*URR, urr *URR) bool { // check if urr is in URRs list + for _, URR := range URRs { + if urr.URRID == URR.URRID { + return true + } + } + return false +} + // Packet Detection. 7.5.2.2-2 type PDI struct { SourceInterface pfcpType.SourceInterface diff --git a/internal/context/sm_context.go b/internal/context/sm_context.go index 0215a504..30bbb061 100644 --- a/internal/context/sm_context.go +++ b/internal/context/sm_context.go @@ -19,6 +19,7 @@ import ( "github.com/free5gc/ngap/ngapType" "github.com/free5gc/openapi" "github.com/free5gc/openapi/Namf_Communication" + "github.com/free5gc/openapi/Nchf_ConvergedCharging" "github.com/free5gc/openapi/Nnrf_NFDiscovery" "github.com/free5gc/openapi/Npcf_SMPolicyControl" "github.com/free5gc/openapi/models" @@ -44,18 +45,19 @@ const ( type UrrType int +// Reserved URR report for ID = 0 ~ 6 const ( - N3N6_MBEQ_URR UrrType = iota - N3N6_MAEQ_URR - N3N9_MBEQ_URR - N3N9_MAEQ_URR - N9N6_MBEQ_URR - N9N6_MAEQ_URR + N3N6_MBQE_URR UrrType = iota + N3N6_MAQE_URR + N3N9_MBQE_URR + N3N9_MAQE_URR + N9N6_MBQE_URR + N9N6_MAQE_URR NOT_FOUND_URR ) func (t UrrType) String() string { - urrTypeList := []string{"N3N6_MBEQ", "N3N6_MAEQ", "N3N9_MBEQ", "N3N9_MAEQ", "N9N6_MBEQ", "N9N6_MAEQ"} + urrTypeList := []string{"N3N6_MBQE", "N3N6_MAQE", "N3N9_MBQE", "N3N9_MAQE", "N9N6_MBQE", "N9N6_MAQE"} return urrTypeList[t] } @@ -99,6 +101,7 @@ type EventExposureNotification struct { type UsageReport struct { UrrId uint32 + UpfId string TotalVolume uint64 UplinkVolume uint64 @@ -153,9 +156,11 @@ type SMContext struct { // Client SMPolicyClient *Npcf_SMPolicyControl.APIClient CommunicationClient *Namf_Communication.APIClient + ChargingClient *Nchf_ConvergedCharging.APIClient AMFProfile models.NfProfile SelectedPCFProfile models.NfProfile + SelectedCHFProfile models.NfProfile SmStatusNotifyUri string Tunnel *UPTunnel @@ -171,6 +176,7 @@ type SMContext struct { PCCRules map[string]*PCCRule SessionRules map[string]*SessionRule TrafficControlDatas map[string]*TrafficControlData + ChargingData map[string]*models.ChargingData QosDatas map[string]*models.QosData UpPathChgEarlyNotification map[string]*EventExposureNotification // Key: Uri+NotifId @@ -196,8 +202,21 @@ type SMContext struct { UrrUpfMap map[string]*URR UrrReportTime time.Duration UrrReportThreshold uint64 - UrrReports []UsageReport - + // Cache the usage reports, sent from UPF + // Those information will be included in CDR. + UrrReports []UsageReport + + // Charging Related + ChargingDataRef string + // Each PDU session has a unique charging id + ChargingID int32 + RequestedUnit int32 + // key = urrid + // All urr can map to a rating group + // However, a rating group may map to more than one urr + // e.g. In UL CL case, the rating group for recoreding PDU Session volume may map to two URR + // one is for PSA 1, the other is for PSA 2. + ChargingInfo map[uint32]*ChargingInfo // NAS Pti uint8 EstAcceptCause5gSMValue uint8 @@ -282,11 +301,19 @@ func NewSMContext(id string, pduSessID int32) *SMContext { smContext.GenerateUrrId() smContext.UrrUpfMap = make(map[string]*URR) + smContext.ChargingInfo = make(map[uint32]*ChargingInfo) + smContext.ChargingID = GenerateChargingID() + if factory.SmfConfig.Configuration != nil { smContext.UrrReportTime = time.Duration(factory.SmfConfig.Configuration.UrrPeriod) * time.Second smContext.UrrReportThreshold = factory.SmfConfig.Configuration.UrrThreshold logger.CtxLog.Infof("UrrPeriod: %v", smContext.UrrReportTime) logger.CtxLog.Infof("UrrThreshold: %d", smContext.UrrReportThreshold) + if factory.SmfConfig.Configuration.RequestedUnit != 0 { + smContext.RequestedUnit = factory.SmfConfig.Configuration.RequestedUnit + } else { + smContext.RequestedUnit = 1000 + } } return smContext @@ -350,22 +377,22 @@ func GetSMContextBySEID(SEID uint64) *SMContext { func (smContext *SMContext) GenerateUrrId() { if id, err := smContext.UrrIDGenerator.Allocate(); err == nil { - smContext.UrrIdMap[N3N6_MBEQ_URR] = uint32(id) + smContext.UrrIdMap[N3N6_MBQE_URR] = uint32(id) } if id, err := smContext.UrrIDGenerator.Allocate(); err == nil { - smContext.UrrIdMap[N3N6_MAEQ_URR] = uint32(id) + smContext.UrrIdMap[N3N6_MAQE_URR] = uint32(id) } if id, err := smContext.UrrIDGenerator.Allocate(); err == nil { - smContext.UrrIdMap[N9N6_MBEQ_URR] = uint32(id) + smContext.UrrIdMap[N9N6_MBQE_URR] = uint32(id) } if id, err := smContext.UrrIDGenerator.Allocate(); err == nil { - smContext.UrrIdMap[N9N6_MAEQ_URR] = uint32(id) + smContext.UrrIdMap[N9N6_MAQE_URR] = uint32(id) } if id, err := smContext.UrrIDGenerator.Allocate(); err == nil { - smContext.UrrIdMap[N3N9_MBEQ_URR] = uint32(id) + smContext.UrrIdMap[N3N9_MBQE_URR] = uint32(id) } if id, err := smContext.UrrIDGenerator.Allocate(); err == nil { - smContext.UrrIdMap[N3N9_MAEQ_URR] = uint32(id) + smContext.UrrIdMap[N3N9_MAQE_URR] = uint32(id) } } @@ -409,6 +436,57 @@ func (smContext *SMContext) PDUAddressToNAS() ([12]byte, uint8) { return addr, addrLen } +// CHFSelection will select CHF for this SM Context +func (smContext *SMContext) CHFSelection() error { + // Send NFDiscovery for find CHF + localVarOptionals := Nnrf_NFDiscovery.SearchNFInstancesParamOpts{ + // Supi: optional.NewString(smContext.Supi), + } + + ctx, _, err := smfContext.GetTokenCtx(models.ServiceName_NNRF_DISC, models.NfType_NRF) + if err != nil { + return err + } + + rsp, res, err := GetSelf(). + NFDiscoveryClient. + NFInstancesStoreApi. + SearchNFInstances(ctx, models.NfType_CHF, models.NfType_SMF, &localVarOptionals) + if err != nil { + return err + } + defer func() { + if rspCloseErr := res.Body.Close(); rspCloseErr != nil { + logger.PduSessLog.Errorf("SmfEventExposureNotification response body cannot close: %+v", rspCloseErr) + } + }() + + if res != nil { + if status := res.StatusCode; status != http.StatusOK { + apiError := err.(openapi.GenericOpenAPIError) + problemDetails := apiError.Model().(models.ProblemDetails) + + logger.CtxLog.Warningf("NFDiscovery SMF return status: %d\n", status) + logger.CtxLog.Warningf("Detail: %v\n", problemDetails.Title) + } + } + + // Select CHF from available CHF + + smContext.SelectedCHFProfile = rsp.NfInstances[0] + + // Create Converged Charging Client for this SM Context + for _, service := range *smContext.SelectedCHFProfile.NfServices { + if service.ServiceName == models.ServiceName_NCHF_CONVERGEDCHARGING { + ConvergedChargingConf := Nchf_ConvergedCharging.NewConfiguration() + ConvergedChargingConf.SetBasePath(service.ApiPrefix) + smContext.ChargingClient = Nchf_ConvergedCharging.NewAPIClient(ConvergedChargingConf) + } + } + + return nil +} + // PCFSelection will select PCF for this SM Context func (smContext *SMContext) PCFSelection() error { ctx, _, err := GetSelf().GetTokenCtx(models.ServiceName_NNRF_DISC, "NRF") @@ -422,7 +500,7 @@ func (smContext *SMContext) PCFSelection() error { localVarOptionals.PreferredLocality = optional.NewString(GetSelf().Locality) } - rep, res, err := GetSelf(). + rsp, res, err := GetSelf(). NFDiscoveryClient. NFInstancesStoreApi. SearchNFInstances(ctx, models.NfType_PCF, models.NfType_SMF, &localVarOptionals) @@ -447,7 +525,7 @@ func (smContext *SMContext) PCFSelection() error { // Select PCF from available PCF - smContext.SelectedPCFProfile = rep.NfInstances[0] + smContext.SelectedPCFProfile = rsp.NfInstances[0] // Create SMPolicyControl Client for this SM Context for _, service := range *smContext.SelectedPCFProfile.NfServices { @@ -565,6 +643,7 @@ func (c *SMContext) AllocUeIP() error { return nil } +// This function create a data path to be default data path. func (c *SMContext) SelectDefaultDataPath() error { if c.SelectionParam == nil || c.SelectedUPF == nil { return fmt.Errorf("SelectDefaultDataPath err: SelectionParam or SelectedUPF is nil") @@ -577,10 +656,10 @@ func (c *SMContext) SelectDefaultDataPath() error { c.Tunnel.DataPathPool = uePreConfigPaths.DataPathPool c.Tunnel.PathIDGenerator = uePreConfigPaths.PathIDGenerator defaultPath = c.Tunnel.DataPathPool.GetDefaultPath() - } else { - // UE has no pre-config path. + } else if c.Tunnel.DataPathPool.GetDefaultPath() == nil { + // UE has no pre-config path and default path // Use default route - c.Log.Infof("Has no pre-config route") + c.Log.Infof("Has no pre-config route. Has no default path") defaultUPPath := GetUserPlaneInformation().GetDefaultUserPlanePathByDNNAndUPF( c.SelectionParam, c.SelectedUPF) defaultPath = GenerateDataPath(defaultUPPath) @@ -588,18 +667,26 @@ func (c *SMContext) SelectDefaultDataPath() error { defaultPath.IsDefaultPath = true c.Tunnel.AddDataPath(defaultPath) } + } else { + c.Log.Infof("Has no pre-config route. Has default path") + defaultPath = c.Tunnel.DataPathPool.GetDefaultPath() } if defaultPath == nil { - return fmt.Errorf("Data Path not found, Selection Parameter: %s", + return fmt.Errorf("data path not found, Selection Parameter: %s", c.SelectionParam.String()) } - defaultPath.ActivateTunnelAndPDR(c, DefaultPrecedence) + + if !defaultPath.Activated { + defaultPath.ActivateTunnelAndPDR(c, DefaultPrecedence) + } + return nil } func (c *SMContext) CreatePccRuleDataPath(pccRule *PCCRule, tcData *TrafficControlData, qosData *models.QosData, + chgData *models.ChargingData, ) error { var targetRoute models.RouteToLocation if tcData != nil && len(tcData.RouteToLocs) > 0 { @@ -618,11 +705,25 @@ func (c *SMContext) CreatePccRuleDataPath(pccRule *PCCRule, if createdDataPath == nil { return fmt.Errorf("fail to create data path for pcc rule[%s]", pccRule.PccRuleId) } + + // Try to use a default pcc rule as default data path + if c.Tunnel.DataPathPool.GetDefaultPath() == nil && + pccRule.Precedence == 255 { + createdDataPath.IsDefaultPath = true + } + createdDataPath.GBRFlow = isGBRFlow(qosData) createdDataPath.ActivateTunnelAndPDR(c, uint32(pccRule.Precedence)) c.Tunnel.AddDataPath(createdDataPath) pccRule.Datapath = createdDataPath pccRule.AddDataPathForwardingParameters(c, &targetRoute) + + if chgLevel, err := pccRule.IdentifyChargingLevel(); err != nil { + c.Log.Warnf("fail to identify charging level[%+v] for pcc rule[%s]", err, pccRule.PccRuleId) + } else { + pccRule.Datapath.AddChargingRules(c, chgLevel, chgData) + } + pccRule.Datapath.AddQoS(c, pccRule.QFI, qosData) c.AddQosFlow(pccRule.QFI, qosData) return nil @@ -810,15 +911,6 @@ func (smContext *SMContext) IsAllowedPDUSessionType(requestedPDUSessionType uint return nil } -func (smContext *SMContext) GetUrrTypeById(urrId uint32) (UrrType, error) { - for urrType, id := range smContext.UrrIdMap { - if id == urrId { - return urrType, nil - } - } - return NOT_FOUND_URR, fmt.Errorf("Urr type not found ") -} - func (smContext *SMContext) StopT3591() { if smContext.T3591 != nil { smContext.T3591.Stop() diff --git a/internal/context/sm_context_policy.go b/internal/context/sm_context_policy.go index 4df99329..8361ee68 100644 --- a/internal/context/sm_context_policy.go +++ b/internal/context/sm_context_policy.go @@ -5,6 +5,7 @@ import ( "reflect" "github.com/free5gc/openapi/models" + "github.com/free5gc/smf/internal/logger" "github.com/free5gc/smf/pkg/factory" ) @@ -68,6 +69,57 @@ func (c *SMContext) RemoveQosFlow(qfi uint8) { delete(c.AdditonalQosFlows, qfi) } +// For urr that created for Pdu session level charging, it shall be applied to all data path +func (c *SMContext) addPduLevelChargingRuleToFlow(pccRules map[string]*PCCRule) { + var pduLevelChargingUrrs []*URR + + // First, select charging URRs from pcc rule, which charging level is PDU Session level + for id, pcc := range pccRules { + if chargingLevel, err := pcc.IdentifyChargingLevel(); err != nil { + continue + } else if chargingLevel == PduSessionCharging { + pduPcc := pccRules[id] + pduLevelChargingUrrs = pduPcc.Datapath.GetChargingUrr(c) + break + } + } + + for _, flowPcc := range pccRules { + if chgLevel, err := flowPcc.IdentifyChargingLevel(); err != nil { + continue + } else if chgLevel == FlowCharging { + for node := flowPcc.Datapath.FirstDPNode; node != nil; node = node.Next() { + if node.IsAnchorUPF() { + // only the traffic on the PSA UPF will be charged + if node.UpLinkTunnel != nil && node.UpLinkTunnel.PDR != nil { + node.UpLinkTunnel.PDR.AppendURRs(pduLevelChargingUrrs) + } + if node.DownLinkTunnel != nil && node.DownLinkTunnel.PDR != nil { + node.DownLinkTunnel.PDR.AppendURRs(pduLevelChargingUrrs) + } + } + } + } + } + + defaultPath := c.Tunnel.DataPathPool.GetDefaultPath() + if defaultPath == nil { + logger.CtxLog.Errorln("No default data path") + return + } + + for node := defaultPath.FirstDPNode; node != nil; node = node.Next() { + if node.IsAnchorUPF() { + if node.UpLinkTunnel != nil && node.UpLinkTunnel.PDR != nil { + node.UpLinkTunnel.PDR.AppendURRs(pduLevelChargingUrrs) + } + if node.DownLinkTunnel != nil && node.DownLinkTunnel.PDR != nil { + node.DownLinkTunnel.PDR.AppendURRs(pduLevelChargingUrrs) + } + } + } +} + func (c *SMContext) ApplyPccRules( decision *models.SmPolicyDecision, ) error { @@ -78,7 +130,7 @@ func (c *SMContext) ApplyPccRules( finalPccRules := make(map[string]*PCCRule) finalTcDatas := make(map[string]*TrafficControlData) finalQosDatas := make(map[string]*models.QosData) - + finalChgDatas := make(map[string]*models.ChargingData) // Handle QoSData for id, qos := range decision.QosDecs { if qos == nil { @@ -106,12 +158,15 @@ func (c *SMContext) ApplyPccRules( tgtTcID := tgtPcc.RefTcDataID() _, tgtTcData = c.getSrcTgtTcData(decision.TraffContDecs, tgtTcID) + tgtChgID := tgtPcc.RefChgDataID() + _, tgtChgData := c.getSrcTgtChgData(decision.ChgDecs, tgtChgID) + tgtQosID := tgtPcc.RefQosDataID() _, tgtQosData := c.getSrcTgtQosData(decision.QosDecs, tgtQosID) tgtPcc.SetQFI(c.AssignQFI(tgtQosID)) // Create Data path for targetPccRule - if err := c.CreatePccRuleDataPath(tgtPcc, tgtTcData, tgtQosData); err != nil { + if err := c.CreatePccRuleDataPath(tgtPcc, tgtTcData, tgtQosData, tgtChgData); err != nil { return err } if srcPcc != nil { @@ -133,7 +188,7 @@ func (c *SMContext) ApplyPccRules( finalQosDatas[tgtQosID] = tgtQosData } } - if err := checkUpPathChgEvent(c, srcTcData, tgtTcData); err != nil { + if err := checkUpPathChangeEvt(c, srcTcData, tgtTcData); err != nil { c.Log.Warnf("Check UpPathChgEvent err: %v", err) } // Remove handled pcc rule @@ -145,18 +200,22 @@ func (c *SMContext) ApplyPccRules( tcID := pcc.RefTcDataID() srcTcData, tgtTcData := c.getSrcTgtTcData(decision.TraffContDecs, tcID) + chgID := pcc.RefChgDataID() + srcChgData, tgtChgData := c.getSrcTgtChgData(decision.ChgDecs, chgID) + qosID := pcc.RefQosDataID() srcQosData, tgtQosData := c.getSrcTgtQosData(decision.QosDecs, qosID) if !reflect.DeepEqual(srcTcData, tgtTcData) || - !reflect.DeepEqual(srcQosData, tgtQosData) { + !reflect.DeepEqual(srcQosData, tgtQosData) || + !reflect.DeepEqual(srcChgData, tgtChgData) { // Remove old Data path c.PreRemoveDataPath(pcc.Datapath) // Create new Data path - if err := c.CreatePccRuleDataPath(pcc, tgtTcData, tgtQosData); err != nil { + if err := c.CreatePccRuleDataPath(pcc, tgtTcData, tgtQosData, tgtChgData); err != nil { return err } - if err := checkUpPathChgEvent(c, srcTcData, tgtTcData); err != nil { + if err := checkUpPathChangeEvt(c, srcTcData, tgtTcData); err != nil { c.Log.Warnf("Check UpPathChgEvent err: %v", err) } } @@ -167,11 +226,18 @@ func (c *SMContext) ApplyPccRules( if qosID != "" { finalQosDatas[qosID] = tgtQosData } + if chgID != "" { + finalChgDatas[chgID] = tgtChgData + } } + // For PCC rule that is for Pdu session level charging, add the created session rules to all other flow + // so that all volume in the Pdu session could be recorded and charged for the Pdu session + c.addPduLevelChargingRuleToFlow(finalPccRules) c.PCCRules = finalPccRules c.TrafficControlDatas = finalTcDatas c.QosDatas = finalQosDatas + c.ChargingData = finalChgDatas return nil } @@ -192,6 +258,23 @@ func (c *SMContext) getSrcTgtTcData( return srcTcData, tgtTcData } +func (c *SMContext) getSrcTgtChgData( + decisionChgDecs map[string]*models.ChargingData, + chgID string, +) (*models.ChargingData, *models.ChargingData) { + if chgID == "" { + return nil, nil + } + + srcChgData := c.ChargingData[chgID] + tgtChgData := decisionChgDecs[chgID] + if tgtChgData == nil { + // no TcData in decision, use source TcData as target TcData + tgtChgData = srcChgData + } + return srcChgData, tgtChgData +} + func (c *SMContext) getSrcTgtQosData( decisionQosDecs map[string]*models.QosData, qosID string, @@ -266,7 +349,7 @@ func applyFlowInfoOrPFD(pcc *PCCRule) error { return nil } -func checkUpPathChgEvent(c *SMContext, +func checkUpPathChangeEvt(c *SMContext, srcTcData, tgtTcData *TrafficControlData, ) error { var srcRoute, tgtRoute models.RouteToLocation diff --git a/internal/context/sm_context_policy_test.go b/internal/context/sm_context_policy_test.go index a04030d3..a43d3f64 100644 --- a/internal/context/sm_context_policy_test.go +++ b/internal/context/sm_context_policy_test.go @@ -98,11 +98,18 @@ var testConfig = factory.Config{ Description: "SMF procdeure test configuration", }, Configuration: &factory.Configuration{ + Sbi: &factory.Sbi{ + Scheme: "http", + RegisterIPv4: "127.0.0.1", + BindingIPv4: "127.0.0.1", + Port: 8000, + }, UserPlaneInformation: userPlaneConfig, }, } func initConfig() { + InitSmfContext(&testConfig) factory.SmfConfig = &testConfig } diff --git a/internal/context/upf.go b/internal/context/upf.go index 8b11f475..ead327ac 100644 --- a/internal/context/upf.go +++ b/internal/context/upf.go @@ -109,6 +109,14 @@ type UPFInterfaceInfo struct { EndpointFQDN string } +func GetUpfById(uuid string) *UPF { + upf, ok := upfPool.Load(uuid) + if ok { + return upf.(*UPF) + } + return nil +} + // NewUPFInterfaceInfo parse the InterfaceUpfInfoItem to generate UPFInterfaceInfo func NewUPFInterfaceInfo(i *factory.InterfaceUpfInfoItem) *UPFInterfaceInfo { interfaceInfo := new(UPFInterfaceInfo) @@ -542,8 +550,6 @@ func (upf *UPF) AddURR(urrId uint32, opts ...UrrOpt) (*URR, error) { urr := new(URR) urr.MeasureMethod = MesureMethodVol urr.MeasurementInformation = MeasureInformation(true, false) - urr.ReportingTrigger.Perio = true - urr.ReportingTrigger.Volth = true for _, opt := range opts { opt(urr) diff --git a/internal/context/user_plane_information.go b/internal/context/user_plane_information.go index 0a2e7772..b76c3296 100644 --- a/internal/context/user_plane_information.go +++ b/internal/context/user_plane_information.go @@ -761,7 +761,7 @@ func getPathBetween(cur *UPNode, dest *UPNode, visited map[*UPNode]bool, path = make([]*UPNode, 0) path = append(path, cur) pathExist = true - return + return path, pathExist } selectedSNssai := selection.SNssai @@ -773,16 +773,14 @@ func getPathBetween(cur *UPNode, dest *UPNode, visited map[*UPNode]bool, continue } - path_tail, path_exist := getPathBetween(node, dest, visited, selection) + path_tail, pathExist := getPathBetween(node, dest, visited, selection) - if path_exist { + if pathExist { path = make([]*UPNode, 0) path = append(path, cur) - path = append(path, path_tail...) - pathExist = true - return + return path, pathExist } } } diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 2acaf1e8..7e1fce33 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -23,6 +23,7 @@ var ( GsmLog *logrus.Entry PfcpLog *logrus.Entry PduSessLog *logrus.Entry + ChargingLog *logrus.Entry UtilLog *logrus.Entry ) @@ -43,5 +44,6 @@ func init() { GsmLog = NfLog.WithField(logger_util.FieldCategory, "GSM") PfcpLog = NfLog.WithField(logger_util.FieldCategory, "PFCP") PduSessLog = NfLog.WithField(logger_util.FieldCategory, "PduSess") + ChargingLog = NfLog.WithField(logger_util.FieldCategory, "Charging") UtilLog = NfLog.WithField(logger_util.FieldCategory, "Util") } diff --git a/internal/pfcp/handler/handler.go b/internal/pfcp/handler/handler.go index 109a1208..47418290 100644 --- a/internal/pfcp/handler/handler.go +++ b/internal/pfcp/handler/handler.go @@ -10,6 +10,7 @@ import ( smf_context "github.com/free5gc/smf/internal/context" "github.com/free5gc/smf/internal/logger" pfcp_message "github.com/free5gc/smf/internal/pfcp/message" + "github.com/free5gc/smf/internal/sbi/producer" ) func HandlePfcpHeartbeatRequest(msg *pfcpUdp.Message) { @@ -195,54 +196,14 @@ func HandlePfcpSessionReportRequest(msg *pfcpUdp.Message) { } if req.ReportType.Usar && req.UsageReport != nil { - HandleReports(req.UsageReport, nil, nil, smContext, upfNodeID) + smContext.HandleReports(req.UsageReport, nil, nil, upfNodeID, "") + // After receiving the Usage Report, it should send charging request to the CHF + // and update the URR with the quota or other charging information according to + // the charging response + producer.ReportUsageAndUpdateQuota(smContext) } // TS 23.502 4.2.3.3 2b. Send Data Notification Ack, SMF->UPF cause.CauseValue = pfcpType.CauseRequestAccepted pfcp_message.SendPfcpSessionReportResponse(msg.RemoteAddr, cause, seqFromUPF, remoteSEID) } - -func HandleReports( - UsageReportReport []*pfcp.UsageReportPFCPSessionReportRequest, - UsageReportModification []*pfcp.UsageReportPFCPSessionModificationResponse, - UsageReportDeletion []*pfcp.UsageReportPFCPSessionDeletionResponse, - smContext *smf_context.SMContext, - nodeId pfcpType.NodeID, -) { - var usageReport smf_context.UsageReport - - for _, report := range UsageReportReport { - usageReport.UrrId = report.URRID.UrrIdValue - usageReport.TotalVolume = report.VolumeMeasurement.TotalVolume - usageReport.UplinkVolume = report.VolumeMeasurement.UplinkVolume - usageReport.DownlinkVolume = report.VolumeMeasurement.DownlinkVolume - usageReport.TotalPktNum = report.VolumeMeasurement.TotalPktNum - usageReport.UplinkPktNum = report.VolumeMeasurement.UplinkPktNum - usageReport.DownlinkPktNum = report.VolumeMeasurement.DownlinkPktNum - - smContext.UrrReports = append(smContext.UrrReports, usageReport) - } - for _, report := range UsageReportModification { - usageReport.UrrId = report.URRID.UrrIdValue - usageReport.TotalVolume = report.VolumeMeasurement.TotalVolume - usageReport.UplinkVolume = report.VolumeMeasurement.UplinkVolume - usageReport.DownlinkVolume = report.VolumeMeasurement.DownlinkVolume - usageReport.TotalPktNum = report.VolumeMeasurement.TotalPktNum - usageReport.UplinkPktNum = report.VolumeMeasurement.UplinkPktNum - usageReport.DownlinkPktNum = report.VolumeMeasurement.DownlinkPktNum - - smContext.UrrReports = append(smContext.UrrReports, usageReport) - } - for _, report := range UsageReportDeletion { - usageReport.UrrId = report.URRID.UrrIdValue - usageReport.TotalVolume = report.VolumeMeasurement.TotalVolume - usageReport.UplinkVolume = report.VolumeMeasurement.UplinkVolume - usageReport.DownlinkVolume = report.VolumeMeasurement.DownlinkVolume - usageReport.TotalPktNum = report.VolumeMeasurement.TotalPktNum - usageReport.UplinkPktNum = report.VolumeMeasurement.UplinkPktNum - usageReport.DownlinkPktNum = report.VolumeMeasurement.DownlinkPktNum - - smContext.UrrReports = append(smContext.UrrReports, usageReport) - } -} diff --git a/internal/pfcp/message/build.go b/internal/pfcp/message/build.go index 73313ce6..8ddbbb78 100644 --- a/internal/pfcp/message/build.go +++ b/internal/pfcp/message/build.go @@ -200,6 +200,13 @@ func urrToCreateURR(urr *context.URR) *pfcp.CreateURR { MeasurementPeriod: uint32(urr.MeasurementPeriod / time.Second), } } + if !urr.QuotaValidityTime.IsZero() { + createURR.QuotaValidityTime = &pfcpType.QuotaValidityTime{ + QuotaValidityTime: uint32(urr.QuotaValidityTime.Sub( + time.Date(1900, time.January, 1, 0, 0, 0, 0, time.UTC)) / 1000000000), + } + } + if urr.VolumeThreshold != 0 { createURR.VolumeThreshold = &pfcpType.VolumeThreshold{ Dlvol: true, @@ -208,6 +215,17 @@ func urrToCreateURR(urr *context.URR) *pfcp.CreateURR { UplinkVolume: urr.VolumeThreshold, } } + if urr.VolumeQuota != 0 { + createURR.VolumeQuota = &pfcpType.VolumeQuota{ + Tovol: true, + Dlvol: true, + Ulvol: true, + TotalVolume: urr.VolumeQuota, + DownlinkVolume: urr.VolumeQuota, + UplinkVolume: urr.VolumeQuota, + } + } + createURR.MeasurementInformation = &urr.MeasurementInformation return createURR @@ -249,6 +267,14 @@ func pdrToUpdatePDR(pdr *context.PDR) *pfcp.UpdatePDR { FarIdValue: pdr.FAR.FARID, } + for _, urr := range pdr.URR { + if urr != nil { + updatePDR.URRID = append(updatePDR.URRID, &pfcpType.URRID{ + UrrIdValue: urr.URRID, + }) + } + } + return updatePDR } @@ -290,6 +316,57 @@ func farToUpdateFAR(far *context.FAR) *pfcp.UpdateFAR { return updateFAR } +func urrToUpdateURR(urr *context.URR) *pfcp.UpdateURR { + updateURR := new(pfcp.UpdateURR) + + updateURR.URRID = &pfcpType.URRID{ + UrrIdValue: urr.URRID, + } + updateURR.MeasurementMethod = &pfcpType.MeasurementMethod{} + switch urr.MeasureMethod { + case context.MesureMethodVol: + updateURR.MeasurementMethod.Volum = true + case context.MesureMethodTime: + updateURR.MeasurementMethod.Durat = true + } + updateURR.ReportingTriggers = &urr.ReportingTrigger + if urr.MeasurementPeriod != 0 { + updateURR.MeasurementPeriod = &pfcpType.MeasurementPeriod{ + MeasurementPeriod: uint32(urr.MeasurementPeriod / time.Second), + } + } + if urr.QuotaValidityTime.IsZero() { + updateURR.QuotaValidityTime = &pfcpType.QuotaValidityTime{ + QuotaValidityTime: uint32(urr.QuotaValidityTime.Sub( + time.Date(1900, time.January, 1, 0, 0, 0, 0, time.UTC)) / 1000000000), + } + } + + if urr.VolumeThreshold != 0 { + updateURR.VolumeThreshold = &pfcpType.VolumeThreshold{ + Dlvol: true, + Ulvol: true, + Tovol: true, + TotalVolume: urr.VolumeThreshold, + DownlinkVolume: urr.VolumeThreshold, + UplinkVolume: urr.VolumeThreshold, + } + } + if urr.VolumeQuota != 0 { + updateURR.VolumeQuota = &pfcpType.VolumeQuota{ + Tovol: true, + Dlvol: true, + Ulvol: true, + TotalVolume: urr.VolumeQuota, + DownlinkVolume: urr.VolumeQuota, + UplinkVolume: urr.VolumeQuota, + } + } + updateURR.MeasurementInformation = &urr.MeasurementInformation + + return updateURR +} + func BuildPfcpSessionEstablishmentRequest( upNodeID pfcpType.NodeID, upN4Addr string, @@ -427,6 +504,7 @@ func BuildPfcpSessionModificationRequest( msg.UpdatePDR = make([]*pfcp.UpdatePDR, 0, 2) msg.UpdateFAR = make([]*pfcp.UpdateFAR, 0, 2) + msg.UpdateURR = make([]*pfcp.UpdateURR, 0, 12) nodeIDtoIP := upNodeID.ResolveNodeIdToIp().String() @@ -490,6 +568,20 @@ func BuildPfcpSessionModificationRequest( switch urr.State { case context.RULE_INITIAL: msg.CreateURR = append(msg.CreateURR, urrToCreateURR(urr)) + case context.RULE_UPDATE: + msg.UpdateURR = append(msg.UpdateURR, urrToUpdateURR(urr)) + case context.RULE_REMOVE: + msg.RemoveURR = append(msg.RemoveURR, &pfcp.RemoveURR{ + URRID: &pfcpType.URRID{ + UrrIdValue: urr.URRID, + }, + }) + case context.RULE_QUERY: + msg.QueryURR = append(msg.QueryURR, &pfcp.QueryURR{ + URRID: &pfcpType.URRID{ + UrrIdValue: urr.URRID, + }, + }) } urr.State = context.RULE_CREATE } diff --git a/internal/pfcp/message/send.go b/internal/pfcp/message/send.go index c891cf35..c92a5fad 100644 --- a/internal/pfcp/message/send.go +++ b/internal/pfcp/message/send.go @@ -78,7 +78,7 @@ func SendPfcpAssociationReleaseRequest(upNodeID pfcpType.NodeID) (resMsg *pfcpUd pfcpMsg, err := BuildPfcpAssociationReleaseRequest() if err != nil { logger.PfcpLog.Errorf("Build PFCP Association Release Request failed: %v", err) - return + return nil, err } message := &pfcp.Message{ @@ -148,7 +148,7 @@ func SendPfcpSessionEstablishmentRequest( ctx, pdrList, farList, barList, qerList, urrList) if err != nil { logger.PfcpLog.Errorf("Build PFCP Session Establishment Request failed: %v", err) - return + return nil, err } message := &pfcp.Message{ @@ -231,7 +231,7 @@ func SendPfcpSessionModificationRequest( ctx, pdrList, farList, barList, qerList, urrList) if err != nil { logger.PfcpLog.Errorf("Build PFCP Session Modification Request failed: %v", err) - return + return nil, err } seqNum := getSeqNumber() @@ -305,7 +305,7 @@ func SendPfcpSessionDeletionRequest(upf *context.UPF, ctx *context.SMContext) (r pfcpMsg, err := BuildPfcpSessionDeletionRequest() if err != nil { logger.PfcpLog.Errorf("Build PFCP Session Deletion Request failed: %v", err) - return + return nil, err } seqNum := getSeqNumber() remoteSEID := ctx.PFCPContext[nodeIDtoIP.String()].RemoteSEID diff --git a/internal/sbi/callback/api_default.go b/internal/sbi/callback/api_default.go index 3fea413f..4e168e6e 100644 --- a/internal/sbi/callback/api_default.go +++ b/internal/sbi/callback/api_default.go @@ -11,6 +11,7 @@ package callback import ( "net/http" + "strings" "github.com/gin-gonic/gin" @@ -60,3 +61,38 @@ func HTTPSmPolicyUpdateNotification(c *gin.Context) { func SmPolicyControlTerminationRequestNotification(c *gin.Context) { c.JSON(http.StatusOK, gin.H{}) } + +func HTTPChargingNotification(c *gin.Context) { + var req models.ChargingNotifyRequest + + requestBody, err := c.GetRawData() + if err != nil { + logger.PduSessLog.Errorln("GetRawData failed") + } + + err = openapi.Deserialize(&req, requestBody, "application/json") + if err != nil { + logger.PduSessLog.Errorln("Deserialize request failed") + } + + reqWrapper := httpwrapper.NewRequest(c.Request, req) + reqWrapper.Params["notifyUri"] = c.Params.ByName("notifyUri") + smContextRef := strings.Split(reqWrapper.Params["notifyUri"], "_")[1] + + HTTPResponse := producer.HandleChargingNotification(reqWrapper.Body.(models.ChargingNotifyRequest), smContextRef) + + for key, val := range HTTPResponse.Header { + c.Header(key, val[0]) + } + + resBody, err := openapi.Serialize(HTTPResponse.Body, "application/json") + if err != nil { + logger.PduSessLog.Errorln("Serialize failed") + } + + _, err = c.Writer.Write(resBody) + if err != nil { + logger.PduSessLog.Errorln("Write failed") + } + c.Status(HTTPResponse.Status) +} diff --git a/internal/sbi/callback/routers.go b/internal/sbi/callback/routers.go index 745391f2..4d318d32 100644 --- a/internal/sbi/callback/routers.go +++ b/internal/sbi/callback/routers.go @@ -70,4 +70,10 @@ var routes = Routes{ "/sm-policies/:smContextRef/terminate", SmPolicyControlTerminationRequestNotification, }, + { + "ChargingNotification", + "POST", + "/:notifyUri", + HTTPChargingNotification, + }, } diff --git a/internal/sbi/consumer/converged_charging.go b/internal/sbi/consumer/converged_charging.go new file mode 100644 index 00000000..e7980823 --- /dev/null +++ b/internal/sbi/consumer/converged_charging.go @@ -0,0 +1,117 @@ +package consumer + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/free5gc/nas/nasConvert" + "github.com/free5gc/openapi" + "github.com/free5gc/openapi/models" + smf_context "github.com/free5gc/smf/internal/context" + "github.com/free5gc/smf/internal/logger" +) + +func buildConvergedChargingRequest(smContext *smf_context.SMContext, + multipleUnitUsage []models.MultipleUnitUsage, +) *models.ChargingDataRequest { + var triggers []models.Trigger + + smfSelf := smf_context.GetSelf() + date := time.Now() + + for _, unitUsage := range multipleUnitUsage { + for _, usedUnit := range unitUsage.UsedUnitContainer { + triggers = append(triggers, usedUnit.Triggers...) + } + } + + req := &models.ChargingDataRequest{ + ChargingId: smContext.ChargingID, + SubscriberIdentifier: smContext.Supi, + NfConsumerIdentification: &models.NfIdentification{ + NodeFunctionality: models.NodeFunctionality_SMF, + NFName: smfSelf.Name, + // not sure if NFIPv4Address is RegisterIPv4 or BindingIPv4 + NFIPv4Address: smfSelf.RegisterIPv4, + }, + InvocationTimeStamp: &date, + Triggers: triggers, + PDUSessionChargingInformation: &models.PduSessionChargingInformation{ + ChargingId: smContext.ChargingID, + UserInformation: &models.UserInformation{ + ServedGPSI: smContext.Gpsi, + ServedPEI: smContext.Pei, + }, + PduSessionInformation: &models.PduSessionInformation{ + PduSessionID: smContext.PDUSessionID, + NetworkSlicingInfo: &models.NetworkSlicingInfo{ + SNSSAI: smContext.SNssai, + }, + + PduType: nasConvert.PDUSessionTypeToModels(smContext.SelectedPDUSessionType), + ServingNetworkFunctionID: &models.ServingNetworkFunctionId{ + ServingNetworkFunctionInformation: &models.NfIdentification{ + NodeFunctionality: models.NodeFunctionality_AMF, + }, + }, + DnnId: smContext.Dnn, + }, + }, + NotifyUri: fmt.Sprintf("%s://%s:%d/nsmf-callback/notify_%s", + smf_context.GetSelf().URIScheme, + smf_context.GetSelf().RegisterIPv4, + smf_context.GetSelf().SBIPort, + smContext.Ref, + ), + MultipleUnitUsage: multipleUnitUsage, + } + + return req +} + +func SendConvergedChargingRequest(smContext *smf_context.SMContext, requestType smf_context.RequestType, + multipleUnitUsage []models.MultipleUnitUsage, +) (*models.ChargingDataResponse, *models.ProblemDetails, error) { + logger.ChargingLog.Info("Handle SendConvergedChargingRequest") + + req := buildConvergedChargingRequest(smContext, multipleUnitUsage) + + var rsp models.ChargingDataResponse + var httpResponse *http.Response + var err error + + // select the appropriate converged charging service based on trigger type + switch requestType { + case smf_context.CHARGING_INIT: + rsp, httpResponse, err = smContext.ChargingClient.DefaultApi.ChargingdataPost(context.Background(), *req) + chargingDataRef := strings.Split(httpResponse.Header.Get("Location"), "/") + smContext.ChargingDataRef = chargingDataRef[len(chargingDataRef)-1] + case smf_context.CHARGING_UPDATE: + rsp, httpResponse, err = smContext.ChargingClient.DefaultApi.ChargingdataChargingDataRefUpdatePost( + context.Background(), smContext.ChargingDataRef, *req) + case smf_context.CHARGING_RELEASE: + httpResponse, err = smContext.ChargingClient.DefaultApi.ChargingdataChargingDataRefReleasePost(context.Background(), + smContext.ChargingDataRef, *req) + } + + defer func() { + if resCloseErr := httpResponse.Body.Close(); resCloseErr != nil { + logger.ChargingLog.Errorf("RegisterNFInstance response body cannot close: %+v", resCloseErr) + } + }() + + if err == nil { + return &rsp, nil, nil + } else if httpResponse != nil { + if httpResponse.Status != err.Error() { + return nil, nil, err + } + problem := err.(openapi.GenericOpenAPIError).Model().(models.ProblemDetails) + return nil, &problem, nil + } else { + return nil, nil, openapi.ReportError("server no response") + } +} diff --git a/internal/sbi/producer/callback.go b/internal/sbi/producer/callback.go index 38067c27..18adc46f 100644 --- a/internal/sbi/producer/callback.go +++ b/internal/sbi/producer/callback.go @@ -2,6 +2,7 @@ package producer import ( "context" + "fmt" "net/http" "github.com/free5gc/openapi/Nsmf_EventExposure" @@ -11,6 +12,60 @@ import ( "github.com/free5gc/util/httpwrapper" ) +func HandleChargingNotification(chargingNotifyRequest models.ChargingNotifyRequest, + smContextRef string, +) *httpwrapper.Response { + logger.ChargingLog.Info("Handle Charging Notification") + + problemDetails := chargingNotificationProcedure(chargingNotifyRequest, smContextRef) + if problemDetails != nil { + return httpwrapper.NewResponse(int(problemDetails.Status), nil, problemDetails) + } else { + return httpwrapper.NewResponse(http.StatusNoContent, nil, nil) + } +} + +// While receive Charging Notification from CHF, SMF will send Charging Information to CHF and update UPF +// The Charging Notification will be sent when CHF found the changes of the quota file. +func chargingNotificationProcedure(req models.ChargingNotifyRequest, smContextRef string) *models.ProblemDetails { + if smContext := smf_context.GetSMContextByRef(smContextRef); smContext != nil { + smContext.SMLock.Lock() + defer smContext.SMLock.Unlock() + upfUrrMap := make(map[string][]*smf_context.URR) + for _, reauthorizeDetail := range req.ReauthorizationDetails { + rg := reauthorizeDetail.RatingGroup + logger.ChargingLog.Infof("Force update charging information for rating group %d", rg) + for _, urr := range smContext.UrrUpfMap { + chgInfo := smContext.ChargingInfo[urr.URRID] + if chgInfo.RatingGroup == rg || + chgInfo.ChargingLevel == smf_context.PduSessionCharging { + logger.ChargingLog.Tracef("Query URR (%d) for Rating Group (%d)", urr.URRID, rg) + upfId := smContext.ChargingInfo[urr.URRID].UpfId + upfUrrMap[upfId] = append(upfUrrMap[upfId], urr) + } + } + } + for upfId, urrList := range upfUrrMap { + upf := smf_context.GetUpfById(upfId) + if upf == nil { + logger.ChargingLog.Warnf("Cound not find upf %s", upfId) + continue + } + QueryReport(smContext, upf, urrList, models.TriggerType_FORCED_REAUTHORISATION) + } + ReportUsageAndUpdateQuota(smContext) + } else { + problemDetails := &models.ProblemDetails{ + Status: http.StatusNotFound, + Cause: "CONTEXT_NOT_FOUND", + Detail: fmt.Sprintf("SM Context [%s] Not Found ", smContextRef), + } + return problemDetails + } + + return nil +} + func HandleSMPolicyUpdateNotify(smContextRef string, request models.SmPolicyNotification) *httpwrapper.Response { logger.PduSessLog.Infoln("In HandleSMPolicyUpdateNotify") decision := request.SmPolicyDecision diff --git a/internal/sbi/producer/charging_trigger.go b/internal/sbi/producer/charging_trigger.go new file mode 100644 index 00000000..aecf2d72 --- /dev/null +++ b/internal/sbi/producer/charging_trigger.go @@ -0,0 +1,377 @@ +package producer + +import ( + "time" + + "github.com/free5gc/openapi/models" + "github.com/free5gc/pfcp" + "github.com/free5gc/pfcp/pfcpType" + smf_context "github.com/free5gc/smf/internal/context" + "github.com/free5gc/smf/internal/logger" + pfcp_message "github.com/free5gc/smf/internal/pfcp/message" + "github.com/free5gc/smf/internal/sbi/consumer" +) + +func CreateChargingSession(smContext *smf_context.SMContext) { + _, problemDetails, err := consumer.SendConvergedChargingRequest(smContext, smf_context.CHARGING_INIT, nil) + if problemDetails != nil { + logger.ChargingLog.Errorf("Send Charging Data Request[Init] Failed Problem[%+v]", problemDetails) + } else if err != nil { + logger.ChargingLog.Errorf("Send Charging Data Request[Init] Error[%+v]", err) + } else { + logger.ChargingLog.Infof("Send Charging Data Request[Init] successfully") + } +} + +func UpdateChargingSession(smContext *smf_context.SMContext, urrList []*smf_context.URR, trigger models.Trigger) { + var multipleUnitUsage []models.MultipleUnitUsage + + for _, urr := range urrList { + if chgInfo := smContext.ChargingInfo[urr.URRID]; chgInfo != nil { + rg := chgInfo.RatingGroup + logger.PduSessLog.Tracef("Receive Usage Report from URR[%d], correspopnding Rating Group[%d], ChargingMethod %v", + urr.URRID, rg, chgInfo.ChargingMethod) + triggerTime := time.Now() + + uu := models.UsedUnitContainer{ + QuotaManagementIndicator: chgInfo.ChargingMethod, + Triggers: []models.Trigger{trigger}, + TriggerTimestamp: &triggerTime, + } + + muu := models.MultipleUnitUsage{ + RatingGroup: rg, + UPFID: chgInfo.UpfId, + UsedUnitContainer: []models.UsedUnitContainer{uu}, + } + + multipleUnitUsage = append(multipleUnitUsage, muu) + } + } + + _, problemDetails, err := consumer.SendConvergedChargingRequest(smContext, + smf_context.CHARGING_UPDATE, multipleUnitUsage) + if problemDetails != nil { + logger.ChargingLog.Errorf("Send Charging Data Request[Init] Failed Problem[%+v]", problemDetails) + } else if err != nil { + logger.ChargingLog.Errorf("Send Charging Data Request[Init] Error[%+v]", err) + } else { + logger.ChargingLog.Infof("Send Charging Data Request[Init] successfully") + } +} + +func ReleaseChargingSession(smContext *smf_context.SMContext) { + multipleUnitUsage := buildMultiUnitUsageFromUsageReport(smContext) + + _, problemDetails, err := consumer.SendConvergedChargingRequest(smContext, + smf_context.CHARGING_RELEASE, multipleUnitUsage) + if problemDetails != nil { + logger.ChargingLog.Errorf("Send Charging Data Request[Termination] Failed Problem[%+v]", problemDetails) + } else if err != nil { + logger.ChargingLog.Errorf("Send Charging Data Request[Termination] Error[%+v]", err) + } else { + logger.ChargingLog.Infof("Send Charging Data Request[Termination] successfully") + } +} + +// Report usage report to the CHF and update the URR with the charging information in the charging response +func ReportUsageAndUpdateQuota(smContext *smf_context.SMContext) { + multipleUnitUsage := buildMultiUnitUsageFromUsageReport(smContext) + + if len(multipleUnitUsage) != 0 { + rsp, problemDetails, err := consumer.SendConvergedChargingRequest(smContext, + smf_context.CHARGING_UPDATE, multipleUnitUsage) + + if problemDetails != nil { + logger.ChargingLog.Errorf("Send Charging Data Request[Update] Failed Problem[%+v]", problemDetails) + } else if err != nil { + logger.ChargingLog.Errorf("Send Charging Data Request[Update] Error[%+v]", err) + } else { + var pfcpResponseStatus smf_context.PFCPSessionResponseStatus + + upfUrrMap := make(map[string][]*smf_context.URR) + + logger.ChargingLog.Infof("Send Charging Data Request[Update] successfully") + smContext.SetState(smf_context.PFCPModification) + + updateGrantedQuota(smContext, rsp.MultipleUnitInformation) + // Usually only the anchor UPF need to be updated + for _, urr := range smContext.UrrUpfMap { + upfId := smContext.ChargingInfo[urr.URRID].UpfId + + if urr.State == smf_context.RULE_UPDATE { + upfUrrMap[upfId] = append(upfUrrMap[upfId], urr) + } + } + + if len(upfUrrMap) == 0 { + logger.ChargingLog.Infof("Do not have urr that need to update charging information") + return + } + + for upfId, urrList := range upfUrrMap { + upf := smf_context.GetUpfById(upfId) + if upf == nil { + logger.PduSessLog.Warnf("Cound not find upf %s", upfId) + continue + } + rcvMsg, err := pfcp_message.SendPfcpSessionModificationRequest( + upf, smContext, nil, nil, nil, nil, urrList) + if err != nil { + logger.PduSessLog.Warnf("Sending PFCP Session Modification Request to AN UPF error: %+v", err) + pfcpResponseStatus = smf_context.SessionUpdateFailed + } else { + logger.PduSessLog.Infoln("Received PFCP Session Modification Response") + pfcpResponseStatus = smf_context.SessionUpdateSuccess + } + + rsp := rcvMsg.PfcpMessage.Body.(pfcp.PFCPSessionModificationResponse) + if rsp.Cause == nil || rsp.Cause.CauseValue != pfcpType.CauseRequestAccepted { + logger.PduSessLog.Warn("Received PFCP Session Modification Not Accepted Response from AN UPF") + pfcpResponseStatus = smf_context.SessionUpdateFailed + } + + switch pfcpResponseStatus { + case smf_context.SessionUpdateSuccess: + logger.PfcpLog.Traceln("In case SessionUpdateSuccess") + smContext.SetState(smf_context.Active) + case smf_context.SessionUpdateFailed: + logger.PfcpLog.Traceln("In case SessionUpdateFailed") + smContext.SetState(smf_context.Active) + } + } + } + } else { + logger.ChargingLog.Infof("No report need to be charged") + } +} + +func buildMultiUnitUsageFromUsageReport(smContext *smf_context.SMContext) []models.MultipleUnitUsage { + logger.ChargingLog.Infof("build MultiUnitUsageFromUsageReport") + + var ratingGroupUnitUsagesMap map[int32]models.MultipleUnitUsage + var multipleUnitUsage []models.MultipleUnitUsage + + ratingGroupUnitUsagesMap = make(map[int32]models.MultipleUnitUsage) + for _, ur := range smContext.UrrReports { + if ur.ReportTpye != "" { + var triggers []models.Trigger + + chgInfo := smContext.ChargingInfo[ur.UrrId] + if chgInfo == nil { + logger.PduSessLog.Tracef("URR %d is not for charging", ur.UrrId) + continue + } + + if chgInfo.ChargingLevel == smf_context.FlowCharging && ur.ReportTpye == models.TriggerType_VOLUME_LIMIT { + triggers = []models.Trigger{ + { + TriggerType: ur.ReportTpye, + TriggerCategory: models.TriggerCategory_DEFERRED_REPORT, + }, + } + } else { + triggers = []models.Trigger{ + { + TriggerType: ur.ReportTpye, + TriggerCategory: models.TriggerCategory_IMMEDIATE_REPORT, + }, + } + } + + rg := chgInfo.RatingGroup + logger.PduSessLog.Tracef("Receive Usage Report from URR[%d], correspopnding Rating Group[%d], ChargingMethod %v", + ur.UrrId, rg, chgInfo.ChargingMethod) + triggerTime := time.Now() + + uu := models.UsedUnitContainer{ + QuotaManagementIndicator: chgInfo.ChargingMethod, + Triggers: triggers, + TriggerTimestamp: &triggerTime, + DownlinkVolume: int32(ur.DownlinkVolume), + UplinkVolume: int32(ur.UplinkVolume), + TotalVolume: int32(ur.TotalVolume), + } + if unitUsage, ok := ratingGroupUnitUsagesMap[rg]; !ok { + requestUnit := &models.RequestedUnit{} + + // Only online charging should request unit + // offline charging is only for recording usage + switch chgInfo.ChargingMethod { + case models.QuotaManagementIndicator_ONLINE_CHARGING: + requestUnit = &models.RequestedUnit{ + TotalVolume: smContext.RequestedUnit, + DownlinkVolume: smContext.RequestedUnit, + UplinkVolume: smContext.RequestedUnit, + } + } + + ratingGroupUnitUsagesMap[rg] = models.MultipleUnitUsage{ + RatingGroup: rg, + UPFID: ur.UpfId, + UsedUnitContainer: []models.UsedUnitContainer{uu}, + RequestedUnit: requestUnit, + } + } else { + unitUsage.UsedUnitContainer = append(unitUsage.UsedUnitContainer, uu) + ratingGroupUnitUsagesMap[rg] = unitUsage + } + } else { + logger.PduSessLog.Tracef("Report for urr (%d) will not be charged", ur.UrrId) + } + } + + smContext.UrrReports = []smf_context.UsageReport{} + + for _, unitUsage := range ratingGroupUnitUsagesMap { + multipleUnitUsage = append(multipleUnitUsage, unitUsage) + } + + return multipleUnitUsage +} + +func getUrrByRg(smContext *smf_context.SMContext, upfId string, rg int32) *smf_context.URR { + for _, urr := range smContext.UrrUpfMap { + if smContext.ChargingInfo[urr.URRID] != nil && + smContext.ChargingInfo[urr.URRID].RatingGroup == rg && + smContext.ChargingInfo[urr.URRID].UpfId == upfId { + return urr + } + } + + return nil +} + +// Update the urr by the charging information renewed by chf +func updateGrantedQuota(smContext *smf_context.SMContext, multipleUnitInformation []models.MultipleUnitInformation) { + for _, ui := range multipleUnitInformation { + trigger := pfcpType.ReportingTriggers{} + + rg := ui.RatingGroup + upfId := ui.UPFID + + if urr := getUrrByRg(smContext, upfId, rg); urr != nil { + urr.State = smf_context.RULE_UPDATE + chgInfo := smContext.ChargingInfo[urr.URRID] + + for _, t := range ui.Triggers { + switch t.TriggerType { + case models.TriggerType_VOLUME_LIMIT: + // According to 32.255, the for the trigger "Expirt of datavolume limit" have two reporting level + // In the Pdu sesion level, the report should be "Immediate report", + // that is this report should send to CHF immediately + // In the Rating Group level, the report should be "Defferd report", that is this report should send to CHF + // when the in the next charging request triggereed + // by charging trigger that belongs to the type of immediate report + + // TODO: Currently CHF cannot identify the report level since it only knows the rating group, + // so the both charging level of "Expirt of datavolume limit" + // will be included in the report, and the report type will be determined by the SMF + switch chgInfo.ChargingLevel { + case smf_context.PduSessionCharging: + if t.TriggerCategory == models.TriggerCategory_IMMEDIATE_REPORT { + smContext.Log.Infof("Add Volume Limit Expiry Timer for Pdu session, it's rating group is [%d]", rg) + + if chgInfo.VolumeLimitExpiryTimer != nil { + chgInfo.VolumeLimitExpiryTimer.Stop() + chgInfo.VolumeLimitExpiryTimer = nil + } + + chgInfo.VolumeLimitExpiryTimer = smf_context.NewTimer(time.Duration(t.VolumeLimit)*time.Second, 1, + func(expireTimes int32) { + smContext.SMLock.Lock() + defer smContext.SMLock.Unlock() + urrList := []*smf_context.URR{urr} + upf := smf_context.GetUpfById(ui.UPFID) + if upf != nil { + QueryReport(smContext, upf, urrList, models.TriggerType_VOLUME_LIMIT) + ReportUsageAndUpdateQuota(smContext) + } + }, + func() { + smContext.Log.Tracef("Volume Limit Expiry for Pdu session, it's rating group is [%d]", rg) + chgInfo.VolumeLimitExpiryTimer.Stop() + chgInfo.VolumeLimitExpiryTimer = nil + }) + } + case smf_context.FlowCharging: + if t.TriggerCategory == models.TriggerCategory_DEFERRED_REPORT { + smContext.Log.Infof("Add Volume Limit Expiry Timer for rating group [%d] ", rg) + + if chgInfo.VolumeLimitExpiryTimer != nil { + chgInfo.VolumeLimitExpiryTimer.Stop() + chgInfo.VolumeLimitExpiryTimer = nil + } + + chgInfo.VolumeLimitExpiryTimer = smf_context.NewTimer(time.Duration(t.VolumeLimit)*time.Second, 1, + func(expireTimes int32) { + smContext.SMLock.Lock() + defer smContext.SMLock.Unlock() + urrList := []*smf_context.URR{urr} + upf := smf_context.GetUpfById(ui.UPFID) + if upf != nil { + QueryReport(smContext, upf, urrList, models.TriggerType_VOLUME_LIMIT) + } + }, + func() { + smContext.Log.Tracef("Volume Limit Expiry for rating group [%d]", rg) + chgInfo.VolumeLimitExpiryTimer.Stop() + chgInfo.VolumeLimitExpiryTimer = nil + }) + } + } + case models.TriggerType_MAX_NUMBER_OF_CHANGES_IN_CHARGING_CONDITIONS: + switch chgInfo.ChargingLevel { + case smf_context.PduSessionCharging: + chgInfo.EventLimitExpiryTimer = smf_context.NewTimer(time.Duration(t.EventLimit)*time.Second, 1, + func(expireTimes int32) { + smContext.SMLock.Lock() + defer smContext.SMLock.Unlock() + urrList := []*smf_context.URR{urr} + upf := smf_context.GetUpfById(ui.UPFID) + if upf != nil { + QueryReport(smContext, upf, urrList, models.TriggerType_VOLUME_LIMIT) + ReportUsageAndUpdateQuota(smContext) + } + }, + func() { + smContext.Log.Tracef("Event Limit Expiry Timer is triggered") + chgInfo.EventLimitExpiryTimer = nil + }) + default: + smContext.Log.Tracef("MAX_NUMBER_OF_CHANGES_IN_CHARGING_CONDITIONS" + + "should only be applied to PDU session level charging") + } + case models.TriggerType_QUOTA_THRESHOLD: + if ui.VolumeQuotaThreshold != 0 { + trigger.Volth = true + urr.VolumeThreshold = uint64(ui.VolumeQuotaThreshold) + } + // The difference between the quota validity time and the volume limit is + // that the validity time is counted by the UPF, the volume limit is counted by the SMF + case models.TriggerType_VALIDITY_TIME: + if ui.ValidityTime != 0 { + urr.ReportingTrigger.Quvti = true + urr.QuotaValidityTime = time.Now().Add(time.Second * time.Duration(ui.ValidityTime)) + } + case models.TriggerType_QUOTA_EXHAUSTED: + if chgInfo.ChargingMethod == models.QuotaManagementIndicator_ONLINE_CHARGING { + if ui.GrantedUnit != nil { + trigger.Volqu = true + urr.VolumeQuota = uint64(ui.GrantedUnit.TotalVolume) + } else { + // No granted quota, so set the urr.VolumeQuota to 0, upf should stop send traffic + logger.ChargingLog.Warnf("No granted quota") + trigger.Volqu = true + urr.VolumeQuota = 0 + } + } + } + } + + urr.ReportingTrigger = trigger + } else { + logger.PduSessLog.Warnf("Do not find charging Information for rating group[%d]\n", rg) + } + } +} diff --git a/internal/sbi/producer/datapath.go b/internal/sbi/producer/datapath.go index 167229b6..e6e2c4db 100644 --- a/internal/sbi/producer/datapath.go +++ b/internal/sbi/producer/datapath.go @@ -10,7 +10,6 @@ import ( "github.com/free5gc/pfcp/pfcpUdp" smf_context "github.com/free5gc/smf/internal/context" "github.com/free5gc/smf/internal/logger" - "github.com/free5gc/smf/internal/pfcp/handler" pfcp_message "github.com/free5gc/smf/internal/pfcp/message" ) @@ -89,7 +88,7 @@ func ActivateUPFSession( if !exist || sessionContext.RemoteSEID == 0 { go establishPfcpSession(smContext, pfcp, resChan) } else { - go modifyExistingPfcpSession(smContext, pfcp, resChan) + go modifyExistingPfcpSession(smContext, pfcp, resChan, "") } } @@ -97,6 +96,28 @@ func ActivateUPFSession( close(resChan) } +func QueryReport(smContext *smf_context.SMContext, upf *smf_context.UPF, + urrs []*smf_context.URR, reportResaon models.TriggerType, +) { + for _, urr := range urrs { + urr.State = smf_context.RULE_QUERY + } + + pfcpState := &PFCPState{ + upf: upf, + urrList: urrs, + } + + resChan := make(chan SendPfcpResult) + go modifyExistingPfcpSession(smContext, pfcpState, resChan, reportResaon) + pfcpResult := <-resChan + + if pfcpResult.Err != nil { + logger.PduSessLog.Errorf("Query URR Report by PFCP Session Mod Request fail: %v", pfcpResult.Err) + return + } +} + func establishPfcpSession(smContext *smf_context.SMContext, state *PFCPState, resCh chan<- SendPfcpResult, @@ -140,6 +161,7 @@ func modifyExistingPfcpSession( smContext *smf_context.SMContext, state *PFCPState, resCh chan<- SendPfcpResult, + reportResaon models.TriggerType, ) { logger.PduSessLog.Infoln("Sending PFCP Session Modification Request") @@ -165,7 +187,7 @@ func modifyExistingPfcpSession( if rsp.UsageReport != nil { SEID := rcvMsg.PfcpMessage.Header.SEID upfNodeID := smContext.GetNodeIDByLocalSEID(SEID) - handler.HandleReports(nil, rsp.UsageReport, nil, smContext, upfNodeID) + smContext.HandleReports(nil, rsp.UsageReport, nil, upfNodeID, reportResaon) } } else { resCh <- SendPfcpResult{ @@ -425,7 +447,7 @@ func deletePfcpSession(upf *smf_context.UPF, ctx *smf_context.SMContext, resCh c if rsp.UsageReport != nil { SEID := rcvMsg.PfcpMessage.Header.SEID upfNodeID := ctx.GetNodeIDByLocalSEID(SEID) - handler.HandleReports(nil, nil, rsp.UsageReport, ctx, upfNodeID) + ctx.HandleReports(nil, nil, rsp.UsageReport, upfNodeID, "") } } else { logger.PduSessLog.Warn("Received PFCP Session Deletion Not Accepted Response") diff --git a/internal/sbi/producer/gsm_handler_test.go b/internal/sbi/producer/gsm_handler_test.go index df977d68..61f6391b 100644 --- a/internal/sbi/producer/gsm_handler_test.go +++ b/internal/sbi/producer/gsm_handler_test.go @@ -30,7 +30,7 @@ func TestBuildNASPacketFilterFromPacketFilterInfo(t *testing.T) { }, flowInfo: models.FlowInformation{ FlowDirection: models.FlowDirectionRm_BIDIRECTIONAL, - FlowDescription: "permit out ip from any to any", + FlowDescription: "permit out ip from any to assigned", }, }, { diff --git a/internal/sbi/producer/pdu_session.go b/internal/sbi/producer/pdu_session.go index cd0ad298..80aa5cc2 100644 --- a/internal/sbi/producer/pdu_session.go +++ b/internal/sbi/producer/pdu_session.go @@ -188,6 +188,15 @@ func HandlePDUSessionSMContextCreate(isDone <-chan struct{}, } smContext.SMPolicyID = smPolicyID + // PDU session create is a charging event + logger.PduSessLog.Infof("CHF Selection for SMContext SUPI[%s] PDUSessionID[%d]\n", + smContext.Supi, smContext.PDUSessionID) + if err = smContext.CHFSelection(); err != nil { + logger.PduSessLog.Errorln("chf selection error:", err) + } else { + CreateChargingSession(smContext) + } + // Update SessionRule from decision if err = smContext.ApplySessionRules(smPolicyDecision); err != nil { smContext.Log.Errorf("PDUSessionSMContextCreate err: %v", err) @@ -196,7 +205,13 @@ func HandlePDUSessionSMContextCreate(isDone <-chan struct{}, &Nsmf_PDUSession.SubscriptionDenied) } - if err = smContext.SelectDefaultDataPath(); err != nil { + // If PCF prepares default Pcc Rule, SMF do not need to create defaultDataPath. + if err := smContext.ApplyPccRules(smPolicyDecision); err != nil { + smContext.Log.Errorf("apply sm policy decision error: %+v", err) + } + + // SelectDefaultDataPath() will create a default data path if default data path is not found. + if err := smContext.SelectDefaultDataPath(); err != nil { smContext.SetState(smf_context.InActive) smContext.Log.Errorf("PDUSessionSMContextCreate err: %v", err) return makeEstRejectResAndReleaseSMContext(smContext, @@ -204,20 +219,6 @@ func HandlePDUSessionSMContextCreate(isDone <-chan struct{}, &Nsmf_PDUSession.InsufficientResourceSliceDnn) } - if err = smContext.ApplyPccRules(smPolicyDecision); err != nil { - smContext.Log.Errorf("apply sm policy decision error: %+v", err) - } - - // UECM registration - problemDetails, err := consumer.UeCmRegistration(smContext) - if problemDetails != nil { - smContext.Log.Errorf("UECM_Registration Error: %+v", problemDetails) - } else if err != nil { - smContext.Log.Errorf("UECM_Registration Error: %+v", err) - } else { - smContext.Log.Traceln("UECM Registration Successful") - } - // generate goroutine to handle PFCP and // reply PDUSessionSMContextCreate rsp immediately needUnlock = false @@ -459,6 +460,24 @@ func HandlePDUSessionSMContextUpdate(smContextRef string, body models.UpdateSmCo smContext.SetState(smf_context.ModificationPending) response.JsonData.UpCnxState = models.UpCnxState_DEACTIVATED smContext.UpCnxState = body.JsonData.UpCnxState + // UE location change is a charging event + // TODO: This is not tested yet + if smContext.UeLocation != body.JsonData.UeLocation { + // All rating group related to this Pdu session should send charging request + for _, dataPath := range tunnel.DataPathPool { + if dataPath.Activated { + for curDataPathNode := dataPath.FirstDPNode; curDataPathNode != nil; curDataPathNode = curDataPathNode.Next() { + if curDataPathNode.IsANUPF() { + urrList = append(urrList, curDataPathNode.UpLinkTunnel.PDR.URR...) + QueryReport(smContext, curDataPathNode.UPF, urrList, models.TriggerType_USER_LOCATION_CHANGE) + } + } + } + } + + ReportUsageAndUpdateQuota(smContext) + } + smContext.UeLocation = body.JsonData.UeLocation // Set FAR and An, N3 Release Info @@ -783,6 +802,8 @@ func HandlePDUSessionSMContextUpdate(smContextRef string, body models.UpdateSmCo } case smf_context.SessionReleaseSuccess: + ReleaseChargingSession(smContext) + smContext.Log.Traceln("In case SessionReleaseSuccess") smContext.SetState(smf_context.InActivePending) httpResponse = &httpwrapper.Response{ @@ -908,6 +929,8 @@ func HandlePDUSessionSMContextRelease(smContextRef string, body models.ReleaseSm switch pfcpResponseStatus { case smf_context.SessionReleaseSuccess: + ReleaseChargingSession(smContext) + smContext.Log.Traceln("In case SessionReleaseSuccess") smContext.SetState(smf_context.InActive) httpResponse = &httpwrapper.Response{ @@ -1004,6 +1027,8 @@ func HandlePDUSessionSMContextLocalRelease(smContext *smf_context.SMContext, cre switch pfcpResponseStatus { case smf_context.SessionReleaseSuccess: + ReleaseChargingSession(smContext) + logger.CtxLog.Traceln("In case SessionReleaseSuccess") smContext.SetState(smf_context.InActivePending) if createData.SmContextStatusUri != smContext.SmStatusNotifyUri { diff --git a/internal/sbi/producer/ulcl_procedure.go b/internal/sbi/producer/ulcl_procedure.go index 71883e52..7a2e0aed 100644 --- a/internal/sbi/producer/ulcl_procedure.go +++ b/internal/sbi/producer/ulcl_procedure.go @@ -4,6 +4,7 @@ import ( "net" "reflect" + "github.com/free5gc/openapi/models" "github.com/free5gc/pfcp/pfcpType" "github.com/free5gc/pfcp/pfcpUdp" "github.com/free5gc/smf/internal/context" @@ -77,6 +78,19 @@ func EstablishPSA2(smContext *context.SMContext) { qerList := upLinkPDR.QER urrList := upLinkPDR.URR + chgUrrList := []*context.URR{} + for _, urr := range urrList { + if urr.ReportingTrigger.Start { + chgUrrList = append(chgUrrList, urr) + } + } + + // According to 32.255 5.2.2.7, Addition of additional PDU Session Anchor is a charging event + UpdateChargingSession(smContext, chgUrrList, models.Trigger{ + TriggerType: models.TriggerType_ADDITION_OF_UPF, + TriggerCategory: models.TriggerCategory_IMMEDIATE_REPORT, + }) + lastNode := node.Prev() if lastNode != nil && !reflect.DeepEqual(lastNode.UPF.NodeID, ulcl.NodeID) { @@ -100,7 +114,7 @@ func EstablishPSA2(smContext *context.SMContext) { if !exist || sessionContext.RemoteSEID == 0 { go establishPfcpSession(smContext, pfcpState, resChan) } else { - go modifyExistingPfcpSession(smContext, pfcpState, resChan) + go modifyExistingPfcpSession(smContext, pfcpState, resChan, "") } } else { if reflect.DeepEqual(node.UPF.NodeID, ulcl.NodeID) { @@ -170,7 +184,7 @@ func EstablishULCL(smContext *context.SMContext) { curDPNodeIP := ulcl.NodeID.ResolveNodeIdToIp().String() pendingUPFs = append(pendingUPFs, curDPNodeIP) - go modifyExistingPfcpSession(smContext, pfcpState, resChan) + go modifyExistingPfcpSession(smContext, pfcpState, resChan, "") break } } @@ -215,7 +229,7 @@ func UpdatePSA2DownLink(smContext *context.SMContext) { curDPNodeIP := node.UPF.NodeID.ResolveNodeIdToIp().String() pendingUPFs = append(pendingUPFs, curDPNodeIP) - go modifyExistingPfcpSession(smContext, pfcpState, resChan) + go modifyExistingPfcpSession(smContext, pfcpState, resChan, "") logger.PfcpLog.Info("[SMF] Update PSA2 downlink msg has been send") break } @@ -327,7 +341,7 @@ func UpdateRANAndIUPFUpLink(smContext *context.SMContext) { curDPNodeIP := curDPNode.UPF.NodeID.ResolveNodeIdToIp().String() pendingUPFs = append(pendingUPFs, curDPNodeIP) - go modifyExistingPfcpSession(smContext, pfcpState, resChan) + go modifyExistingPfcpSession(smContext, pfcpState, resChan, "") } } diff --git a/pkg/factory/config.go b/pkg/factory/config.go index 8d4a23cd..2303b004 100644 --- a/pkg/factory/config.go +++ b/pkg/factory/config.go @@ -90,6 +90,7 @@ type Configuration struct { T3591 *TimerValue `yaml:"t3591" valid:"required"` T3592 *TimerValue `yaml:"t3592" valid:"required"` NwInstFqdnEncoding bool `yaml:"nwInstFqdnEncoding" valid:"type(bool),optional"` + RequestedUnit int32 `yaml:"requestedUnit,omitempty" valid:"optional"` } type Logger struct {