diff --git a/Dynamic/Identification/PidIdentifier.cs b/Dynamic/Identification/PidIdentifier.cs
index 0855ccb..2e8ec53 100644
--- a/Dynamic/Identification/PidIdentifier.cs
+++ b/Dynamic/Identification/PidIdentifier.cs
@@ -65,6 +65,8 @@ private bool IsFirstModelBetterThanSecondModel(PidParameters firstModel, PidPara
public PidParameters Identify(ref UnitDataSet dataSet)
{
const bool doOnlyWithDelay = false;//should be false unless debugging something
+ const bool DoFiltering = true; //default is true(this improves performance significantly)
+ const bool returnFilterParameters = false; // even if filtering helps improve estimtes, maybe the filter should not be returned?
// 1. try identification with delay of one sample but without filtering
(PidParameters results_withDelay, double[,] U_withDelay) = IdentifyInternal(dataSet, true);
@@ -75,7 +77,7 @@ public PidParameters Identify(ref UnitDataSet dataSet)
return results_withDelay;
}
- // 2. try identification wihtout delay of one sample (yields better results often if dataset is downsampled)
+ // 2. try identification without delay of one sample (yields better results often if dataset is downsampled)
// relative to the clock that the pid algorithm ran on originally
(PidParameters results_withoutDelay, double[,] U_withoutDelay) = IdentifyInternal(dataSet, false);
@@ -83,7 +85,6 @@ public PidParameters Identify(ref UnitDataSet dataSet)
bool doDelay = true;
PidParameters bestPidParameters= results_withoutDelay;
double[,] bestU = U_withoutDelay;
- // if (results_withDelay.Fitting.ObjFunValAbs < results_withoutDelay.Fitting.ObjFunValAbs)
if (IsFirstModelBetterThanSecondModel(results_withDelay, results_withoutDelay))
{
doDelay = true;
@@ -98,40 +99,48 @@ public PidParameters Identify(ref UnitDataSet dataSet)
bestU = U_withoutDelay;
}
- // 3. try filtering y_meas and see if this improves fit
- // if there is noise on y_meas that is not filtered, this may cause too small Kp/Ti
- double maxFilterTime_s = 6 * timeBase_s;
- for (double filterTime_s = timeBase_s; filterTime_s < maxFilterTime_s; filterTime_s += timeBase_s)
- {
- var pidFilterParams = new PidFilterParams(true, 1, filterTime_s);
- (PidParameters results_withFilter, double[,] U_withFilter) = IdentifyInternal(dataSet, doDelay, pidFilterParams);
-
- if (IsFirstModelBetterThanSecondModel(results_withFilter,bestPidParameters))
+ if (DoFiltering)
+ {
+ // 3. try filtering y_meas and see if this improves fit
+ // if there is noise on y_meas that is not filtered, this may cause too small Kp/Ti
+ double maxFilterTime_s = 6 * timeBase_s;
+ for (double filterTime_s = timeBase_s; filterTime_s < maxFilterTime_s; filterTime_s += timeBase_s)
{
- bestU = U_withFilter;
- bestPidParameters = results_withFilter;
+ var pidFilterParams = new PidFilterParams(true, 1, filterTime_s);
+ (PidParameters results_withFilter, double[,] U_withFilter) = IdentifyInternal(dataSet, doDelay, pidFilterParams);
+
+ if (IsFirstModelBetterThanSecondModel(results_withFilter, bestPidParameters))
+ {
+ bestU = U_withFilter;
+ bestPidParameters = results_withFilter;
+ }
}
- }
- // 4. try filtering the input u_meas
- // this is experimental, but if downsampled, Kp/Ti seems often too low, and hypothesis is that this is because
- // small variations in u_meas/y_meas are no longer tightly correlated, so identification should perhaps focus on fitting
- // to only "larger" changes.
- bool filterUmeas = true;
- for (double filterTime_s = timeBase_s; filterTime_s < maxFilterTime_s; filterTime_s += timeBase_s)
- {
- var pidFilterParams = new PidFilterParams(true, 1, filterTime_s);
- (PidParameters results_withFilter, double[,] U_withFilter) =
- IdentifyInternal(dataSet, doDelay, pidFilterParams,filterUmeas);
-
- if (IsFirstModelBetterThanSecondModel(results_withFilter, bestPidParameters))
+ // 4. try filtering the input u_meas
+ // this is experimental, but if downsampled, Kp/Ti seems often too low, and hypothesis is that this is because
+ // small variations in u_meas/y_meas are no longer tightly correlated, so identification should perhaps focus on fitting
+ // to only "larger" changes.
+ bool filterUmeas = true;
+ for (double filterTime_s = timeBase_s; filterTime_s < maxFilterTime_s; filterTime_s += timeBase_s)
{
- bestU = U_withFilter;
- bestPidParameters = results_withFilter;
+ var pidFilterParams = new PidFilterParams(true, 1, filterTime_s);
+ (PidParameters results_withFilter, double[,] U_withFilter) =
+ IdentifyInternal(dataSet, doDelay, pidFilterParams, filterUmeas);
+
+ if (IsFirstModelBetterThanSecondModel(results_withFilter, bestPidParameters))
+ {
+ bestU = U_withFilter;
+ bestPidParameters = results_withFilter;
+ }
}
}
// 5. finally return the "best" result
dataSet.U_sim = bestU;
+ // consider if the the filter paramters maybe do not need to be returned
+ if (!returnFilterParameters)
+ {
+ bestPidParameters.Filtering = new PidFilterParams();
+ }
return bestPidParameters;
}
diff --git a/TimeSeriesAnalysis.Tests/Tests/PidIdentUnitTests.cs b/TimeSeriesAnalysis.Tests/Tests/PidIdentUnitTests.cs
index ee2f8fc..935a8b2 100644
--- a/TimeSeriesAnalysis.Tests/Tests/PidIdentUnitTests.cs
+++ b/TimeSeriesAnalysis.Tests/Tests/PidIdentUnitTests.cs
@@ -73,10 +73,10 @@ public void SetpointStep_WNoise_KpAndTiEstimatedOk(double ySetAmplitude, double
pidDataSet.GetTimeBase(), caseId);
//Shared.DisablePlots();
- Assert.IsTrue(Math.Abs(pidParameters1.Kp - idResult.Kp)< pidParameters1.Kp * tolerancePrc / 100, "Kp too far off!:"+ idResult.Kp);
+ Assert.IsTrue(Math.Abs(pidParameters1.Kp - idResult.Kp)< pidParameters1.Kp * tolerancePrc / 100, "Estimated Kp:"+ idResult.Kp + "True Kp:" + pidParameters1.Kp);
if (pidParameters1.Ti_s > 0)
{
- Assert.IsTrue(Math.Abs(pidParameters1.Ti_s - idResult.Ti_s) < pidParameters1.Ti_s * tolerancePrc / 100, "Ti_s too far off!:"+ idResult.Ti_s);
+ Assert.IsTrue(Math.Abs(pidParameters1.Ti_s - idResult.Ti_s) < pidParameters1.Ti_s * tolerancePrc / 100, "Estimated Ti_s:" + idResult.Ti_s + "True Ti_s:" + pidParameters1.Ti_s);
}
else
{
@@ -235,7 +235,7 @@ public void Pcontroller_estimateU0(double u0)
// when noise is added in the fully sampled, case the solver uses a low-pass filtering of ymeas as a key
// tactic to improve estimates of Kp and Ti. In the downsampled case,it is not possible to use filtering in the same
// way. It may be that instead the solver should run the pid-controller at its original time sampling,
- // maybe this will casue the noie to smoothe out
+ // maybe this will casue the noise to smoothe out
[TestCase(2,0,10)]
[TestCase(2,0.05,35)]// this is poor
@@ -279,10 +279,10 @@ public void DistStep_WNoise_Downsampled_KpAndTiEstimatedOk(int downsampleFactor,
// Shared.DisablePlots();
// asserts
- Assert.IsTrue(Math.Abs(pidParameters1.Kp - idResult.Kp) < pidParameters1.Kp * tolerancePrc / 100, "Kp too far off!:"+ idResult.Kp);
+ Assert.IsTrue(Math.Abs(pidParameters1.Kp - idResult.Kp) < pidParameters1.Kp * tolerancePrc / 100, "Kp estimate:"+ idResult.Kp + "versus true :" + pidParameters1.Kp);
if (pidParameters1.Ti_s > 0)
{
- Assert.IsTrue(Math.Abs(pidParameters1.Ti_s - idResult.Ti_s) < pidParameters1.Ti_s * tolerancePrc / 100, "Ti_s too far off!"+ idResult.Ti_s);
+ Assert.IsTrue(Math.Abs(pidParameters1.Ti_s - idResult.Ti_s) < pidParameters1.Ti_s * tolerancePrc / 100, "Ti_s estimate"+ idResult.Ti_s + "versus true :" + pidParameters1.Ti_s);
}
else
{
diff --git a/TimeSeriesAnalysis.csproj b/TimeSeriesAnalysis.csproj
index 174f909..fa5fde8 100644
--- a/TimeSeriesAnalysis.csproj
+++ b/TimeSeriesAnalysis.csproj
@@ -14,7 +14,7 @@
False
https://github.com/equinor/TimeSeriesAnalysis.git
readme.md
- 1.3.40
+ 1.3.41
Equinor
Equinor
true