From 89d78eaab0356b943b62b8f3ac872446eaad1d0a Mon Sep 17 00:00:00 2001
From: JosephPilov-MSFT <23519517+PiJoCoder@users.noreply.github.com>
Date: Mon, 27 Apr 2026 14:43:05 -0500
Subject: [PATCH 01/12] #513 Add Trace Event Importer (Managed) plugin to
replace ReadTrace.exe dependency in the future
- Add `TraceEventImporter` project: managed importer plugin reading `.trc`/`.xel` files via XELite, normalizing SQL text, computing hash IDs, and bulk-loading into the `ReadTrace` schema
- Embed `CreateSchema.sql` and `PostLoadFixups.sql` as assembly resources; includes `vwBatchPartialAggsByGroupTimeInterval` view matching ReadTrace.exe output format
- Use ReadTrace.exe index naming convention (no `IX_` prefix) for compatibility with `ReadTracePostProcessing.sql` index hints
- Add `ProjectReference` in `sqlnexus.csproj`, register importer in `AppConfig.xml`
- Set `AppendTargetFrameworkToOutputPath=false` so output goes to `bin\Release\` directly
- Implement mutual exclusivity between ReadTrace and TraceEventImporter: auto-disable ReadTrace at startup when both enabled, MessageBox notification on manual toggle
- Restrict `SupportedMasks` to `*pssdiag*.xel` and `*LogScout*.xel` to avoid conflicts with CustomXELImporter
- Declare `ReadTracePostProcessing.sql` as post-script; update `RunPostScripts` to allow it for both importers
- Fix `BulkLoadRowset.GetNewRow()` to handle schema-qualified table names (`[ReadTrace].[tblName]` instead of `[ReadTrace.tblName]`)
- Fix `ImportOptions` saved options overwrite bug: add `HasOption()` method, only restore saved values when they actually exist
- Skip non-boolean options (e.g., `int`) from checkbox menu to prevent `InvalidCastException`
- Add `EnforceTraceImporterExclusivity()` and `IsImporterEnabled()` helpers in `fmImport.cs`
---
BulkLoadEx/BulkLoadRowset.cs | 15 +-
TraceEventImporter/Database/BulkWriter.cs | 352 +++++++++++
TraceEventImporter/Models/TraceEvent.cs | 139 +++++
.../Normalization/HashComputer.cs | 71 +++
.../Normalization/SqlTextNormalizer.cs | 301 ++++++++++
TraceEventImporter/Processing/Aggregator.cs | 267 +++++++++
.../Processing/EventProcessor.cs | 546 ++++++++++++++++++
.../Processing/SpecialProcDetector.cs | 64 ++
TraceEventImporter/Processing/UniqueStore.cs | 162 ++++++
.../Readers/ITraceEventReader.cs | 24 +
TraceEventImporter/Readers/TrcFileReader.cs | 470 +++++++++++++++
TraceEventImporter/Readers/XelFileReader.cs | 270 +++++++++
TraceEventImporter/Schema/CreateSchema.sql | 425 ++++++++++++++
TraceEventImporter/Schema/PostLoadFixups.sql | 92 +++
TraceEventImporter/TraceEventImporter.csproj | 30 +
.../TraceEventImporterPlugin.cs | 386 +++++++++++++
sqlnexus.sln | 52 ++
sqlnexus/AppConfig.xml | 2 +
sqlnexus/Utilities.cs | 4 +
sqlnexus/fmImport.cs | 121 +++-
sqlnexus/sqlnexus.csproj | 8 +-
21 files changed, 3785 insertions(+), 16 deletions(-)
create mode 100644 TraceEventImporter/Database/BulkWriter.cs
create mode 100644 TraceEventImporter/Models/TraceEvent.cs
create mode 100644 TraceEventImporter/Normalization/HashComputer.cs
create mode 100644 TraceEventImporter/Normalization/SqlTextNormalizer.cs
create mode 100644 TraceEventImporter/Processing/Aggregator.cs
create mode 100644 TraceEventImporter/Processing/EventProcessor.cs
create mode 100644 TraceEventImporter/Processing/SpecialProcDetector.cs
create mode 100644 TraceEventImporter/Processing/UniqueStore.cs
create mode 100644 TraceEventImporter/Readers/ITraceEventReader.cs
create mode 100644 TraceEventImporter/Readers/TrcFileReader.cs
create mode 100644 TraceEventImporter/Readers/XelFileReader.cs
create mode 100644 TraceEventImporter/Schema/CreateSchema.sql
create mode 100644 TraceEventImporter/Schema/PostLoadFixups.sql
create mode 100644 TraceEventImporter/TraceEventImporter.csproj
create mode 100644 TraceEventImporter/TraceEventImporterPlugin.cs
diff --git a/BulkLoadEx/BulkLoadRowset.cs b/BulkLoadEx/BulkLoadRowset.cs
index 32ddb3ec..9d72dae7 100644
--- a/BulkLoadEx/BulkLoadRowset.cs
+++ b/BulkLoadEx/BulkLoadRowset.cs
@@ -78,7 +78,20 @@ public DataRow GetNewRow()
if (null == dataTableBuffer)
{
// Get a DataTable w/no rows that captures the schema of the destination table
- SqlDataAdapter schemaAdapter = new SqlDataAdapter("select * from [" + targetTable + "] where 1=0", cn);
+ // Handle schema-qualified names (e.g. "ReadTrace.tblTraceFiles") by bracketing each part separately
+ string quotedName;
+ int dotIndex = targetTable.IndexOf('.');
+ if (dotIndex >= 0)
+ {
+ string schema = targetTable.Substring(0, dotIndex);
+ string table = targetTable.Substring(dotIndex + 1);
+ quotedName = "[" + schema + "].[" + table + "]";
+ }
+ else
+ {
+ quotedName = "[" + targetTable + "]";
+ }
+ SqlDataAdapter schemaAdapter = new SqlDataAdapter("select * from " + quotedName + " where 1=0", cn);
dataTableBuffer = new DataTable();
dataTableBuffer.TableName = targetTable;
schemaAdapter.Fill(this.dataTableBuffer);
diff --git a/TraceEventImporter/Database/BulkWriter.cs b/TraceEventImporter/Database/BulkWriter.cs
new file mode 100644
index 00000000..b8f9aac6
--- /dev/null
+++ b/TraceEventImporter/Database/BulkWriter.cs
@@ -0,0 +1,352 @@
+using System;
+using System.Collections.Generic;
+using System.Data;
+using BulkLoadEx;
+using TraceEventImporter.Processing;
+
+namespace TraceEventImporter.Database
+{
+ ///
+ /// Writes processed trace data to SQL Server using BulkLoadRowset from BulkLoadEx.
+ /// Tables must already exist (created via CreateSchema.sql) before calling these methods.
+ ///
+ public class BulkWriter : IDisposable
+ {
+ private readonly string _connStr;
+ private bool _disposed;
+
+ public long TotalRowsInserted { get; private set; }
+
+ public BulkWriter(string connectionString)
+ {
+ _connStr = connectionString;
+ }
+
+ public void WriteMiscInfo(string attribute, string value)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblMiscInfo", _connStr);
+ DataRow row = bl.GetNewRow();
+ row["Attribute"] = attribute;
+ row["Value"] = (object)value ?? DBNull.Value;
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ bl.Close();
+ }
+
+ public void WriteTraceFile(long firstSeq, long lastSeq, DateTime? firstTime, DateTime? lastTime, long eventsRead, string fileName)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblTraceFiles", _connStr);
+ DataRow row = bl.GetNewRow();
+ row["FirstSeqNumber"] = firstSeq;
+ row["LastSeqNumber"] = lastSeq;
+ row["FirstEventTime"] = (object)firstTime ?? DBNull.Value;
+ row["LastEventTime"] = (object)lastTime ?? DBNull.Value;
+ row["EventsRead"] = eventsRead;
+ row["TraceFileName"] = fileName;
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ bl.Close();
+ }
+
+ public void WriteTracedEvents(IEnumerable eventIds)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblTracedEvents", _connStr);
+ foreach (int id in eventIds)
+ {
+ DataRow row = bl.GetNewRow();
+ row["EventID"] = (short)id;
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void WriteUniqueAppNames(IEnumerable> appNames)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblUniqueAppNames", _connStr);
+ foreach (var kvp in appNames)
+ {
+ DataRow row = bl.GetNewRow();
+ // iID is identity — don't set it; SQL Server generates it
+ row["AppName"] = kvp.Key ?? "";
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void WriteUniqueLoginNames(IEnumerable> loginNames)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblUniqueLoginNames", _connStr);
+ foreach (var kvp in loginNames)
+ {
+ DataRow row = bl.GetNewRow();
+ row["LoginName"] = kvp.Key ?? "";
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void WriteProcedureNames(IEnumerable procs)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblProcedureNames", _connStr);
+ foreach (var proc in procs)
+ {
+ DataRow row = bl.GetNewRow();
+ row["DBID"] = (object)proc.DBID ?? DBNull.Value;
+ row["ObjectID"] = (object)proc.ObjectID ?? DBNull.Value;
+ row["SpecialProcID"] = (object)proc.SpecialProcID ?? DBNull.Value;
+ row["Name"] = proc.Name ?? "";
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void WriteUniqueBatches(IEnumerable batches)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblUniqueBatches", _connStr);
+ foreach (var b in batches)
+ {
+ DataRow row = bl.GetNewRow();
+ row["Seq"] = b.Seq;
+ row["HashID"] = b.HashID;
+ row["OrigText"] = b.OrigText ?? "";
+ row["NormText"] = b.NormText ?? "";
+ row["SpecialProcID"] = (object)b.SpecialProcID ?? DBNull.Value;
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void WriteUniqueStatements(IEnumerable stmts)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblUniqueStatements", _connStr);
+ foreach (var s in stmts)
+ {
+ DataRow row = bl.GetNewRow();
+ row["Seq"] = s.Seq;
+ row["HashID"] = s.HashID;
+ row["OrigText"] = (object)s.OrigText ?? DBNull.Value;
+ row["NormText"] = (object)s.NormText ?? DBNull.Value;
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void WriteBatches(List batches)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblBatches", _connStr);
+ foreach (var b in batches)
+ {
+ DataRow row = bl.GetNewRow();
+ row["BatchSeq"] = b.BatchSeq;
+ row["HashID"] = b.HashID;
+ row["Session"] = b.Session;
+ row["Request"] = b.Request;
+ row["ConnId"] = b.ConnId;
+ row["StartTime"] = (object)b.StartTime ?? DBNull.Value;
+ row["EndTime"] = (object)b.EndTime ?? DBNull.Value;
+ row["Duration"] = (object)b.Duration ?? DBNull.Value;
+ row["Reads"] = (object)b.Reads ?? DBNull.Value;
+ row["Writes"] = (object)b.Writes ?? DBNull.Value;
+ row["CPU"] = (object)b.CPU ?? DBNull.Value;
+ row["fRPCEvent"] = b.fRPCEvent;
+ row["DBID"] = (object)b.DBID ?? DBNull.Value;
+ row["StartSeq"] = (object)b.StartSeq ?? DBNull.Value;
+ row["EndSeq"] = (object)b.EndSeq ?? DBNull.Value;
+ row["AttnSeq"] = (object)b.AttnSeq ?? DBNull.Value;
+ row["ConnSeq"] = (object)b.ConnSeq ?? DBNull.Value;
+ row["TextData"] = (object)b.TextData ?? DBNull.Value;
+ row["OrigRowCount"] = (object)b.OrigRowCount ?? DBNull.Value;
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void WriteStatements(List stmts)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblStatements", _connStr);
+ foreach (var s in stmts)
+ {
+ DataRow row = bl.GetNewRow();
+ row["StmtSeq"] = s.StmtSeq;
+ row["HashID"] = s.HashID;
+ row["Session"] = s.Session;
+ row["Request"] = s.Request;
+ row["ConnId"] = s.ConnId;
+ row["StartTime"] = (object)s.StartTime ?? DBNull.Value;
+ row["EndTime"] = (object)s.EndTime ?? DBNull.Value;
+ row["Duration"] = (object)s.Duration ?? DBNull.Value;
+ row["Reads"] = (object)s.Reads ?? DBNull.Value;
+ row["Writes"] = (object)s.Writes ?? DBNull.Value;
+ row["CPU"] = (object)s.CPU ?? DBNull.Value;
+ row["Rows"] = (object)s.Rows ?? DBNull.Value;
+ row["DBID"] = (object)s.DBID ?? DBNull.Value;
+ row["ObjectID"] = (object)s.ObjectID ?? DBNull.Value;
+ row["NestLevel"] = s.NestLevel.HasValue ? (object)(byte)s.NestLevel.Value : DBNull.Value;
+ row["fDynamicSQL"] = s.fDynamicSQL;
+ row["StartSeq"] = (object)s.StartSeq ?? DBNull.Value;
+ row["EndSeq"] = (object)s.EndSeq ?? DBNull.Value;
+ row["ConnSeq"] = (object)s.ConnSeq ?? DBNull.Value;
+ row["BatchSeq"] = (object)s.BatchSeq ?? DBNull.Value;
+ row["ParentStmtSeq"] = (object)s.ParentStmtSeq ?? DBNull.Value;
+ row["AttnSeq"] = (object)s.AttnSeq ?? DBNull.Value;
+ row["TextData"] = (object)s.TextData ?? DBNull.Value;
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void WriteConnections(List conns)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblConnections", _connStr);
+ foreach (var c in conns)
+ {
+ DataRow row = bl.GetNewRow();
+ row["ConnSeq"] = c.ConnSeq;
+ row["Session"] = c.Session;
+ row["StartTime"] = (object)c.StartTime ?? DBNull.Value;
+ row["EndTime"] = (object)c.EndTime ?? DBNull.Value;
+ row["Duration"] = (object)c.Duration ?? DBNull.Value;
+ row["Reads"] = (object)c.Reads ?? DBNull.Value;
+ row["Writes"] = (object)c.Writes ?? DBNull.Value;
+ row["CPU"] = (object)c.CPU ?? DBNull.Value;
+ row["ApplicationName"] = (object)c.ApplicationName ?? DBNull.Value;
+ row["LoginName"] = (object)c.LoginName ?? DBNull.Value;
+ row["HostName"] = (object)c.HostName ?? DBNull.Value;
+ row["NTDomainName"] = (object)c.NTDomainName ?? DBNull.Value;
+ row["NTUserName"] = (object)c.NTUserName ?? DBNull.Value;
+ row["StartSeq"] = (object)c.StartSeq ?? DBNull.Value;
+ row["EndSeq"] = (object)c.EndSeq ?? DBNull.Value;
+ row["TextData"] = (object)c.TextData ?? DBNull.Value;
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void WriteInterestingEvents(List events)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblInterestingEvents", _connStr);
+ foreach (var e in events)
+ {
+ DataRow row = bl.GetNewRow();
+ row["Seq"] = e.Seq;
+ row["EventID"] = e.EventID;
+ row["Session"] = e.Session;
+ row["Request"] = e.Request;
+ row["ConnId"] = e.ConnId;
+ row["StartTime"] = (object)e.StartTime ?? DBNull.Value;
+ row["EndTime"] = (object)e.EndTime ?? DBNull.Value;
+ row["Duration"] = (object)e.Duration ?? DBNull.Value;
+ row["DBID"] = (object)e.DBID ?? DBNull.Value;
+ row["IntegerData"] = (object)e.IntegerData ?? DBNull.Value;
+ row["EventSubclass"] = (object)e.EventSubclass ?? DBNull.Value;
+ row["TextData"] = (object)e.TextData ?? DBNull.Value;
+ row["ObjectID"] = (object)e.ObjectID ?? DBNull.Value;
+ row["Error"] = (object)e.Error ?? DBNull.Value;
+ row["BatchSeq"] = (object)e.BatchSeq ?? DBNull.Value;
+ row["Severity"] = (object)e.Severity ?? DBNull.Value;
+ row["State"] = (object)e.State ?? DBNull.Value;
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void WriteTimeIntervals(List intervals)
+ {
+ // tblTimeIntervals has IDENTITY — use SqlBulkCopy with KEEP_IDENTITY off
+ // We insert without the TimeInterval column; SQL generates it
+ var bl = new BulkLoadRowset("ReadTrace.tblTimeIntervals", _connStr);
+ foreach (var ti in intervals)
+ {
+ DataRow row = bl.GetNewRow();
+ row["StartTime"] = ti.StartTime;
+ row["EndTime"] = ti.EndTime;
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void WriteBatchPartialAggs(List aggs)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblBatchPartialAggs", _connStr);
+ foreach (var a in aggs)
+ {
+ DataRow row = bl.GetNewRow();
+ row["HashID"] = a.HashID;
+ row["TimeInterval"] = a.TimeInterval;
+ row["StartingEvents"] = a.StartingEvents;
+ row["CompletedEvents"] = a.CompletedEvents;
+ row["AttentionEvents"] = a.AttentionEvents;
+ row["MinDuration"] = (object)a.MinDuration ?? DBNull.Value;
+ row["MaxDuration"] = (object)a.MaxDuration ?? DBNull.Value;
+ row["TotalDuration"] = (object)a.TotalDuration ?? DBNull.Value;
+ row["MinReads"] = (object)a.MinReads ?? DBNull.Value;
+ row["MaxReads"] = (object)a.MaxReads ?? DBNull.Value;
+ row["TotalReads"] = (object)a.TotalReads ?? DBNull.Value;
+ row["MinWrites"] = (object)a.MinWrites ?? DBNull.Value;
+ row["MaxWrites"] = (object)a.MaxWrites ?? DBNull.Value;
+ row["TotalWrites"] = (object)a.TotalWrites ?? DBNull.Value;
+ row["MinCPU"] = (object)a.MinCPU ?? DBNull.Value;
+ row["MaxCPU"] = (object)a.MaxCPU ?? DBNull.Value;
+ row["TotalCPU"] = (object)a.TotalCPU ?? DBNull.Value;
+ row["AppNameID"] = a.AppNameID;
+ row["LoginNameID"] = a.LoginNameID;
+ row["DBID"] = a.DBID;
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void WriteStmtPartialAggs(List aggs)
+ {
+ var bl = new BulkLoadRowset("ReadTrace.tblStmtPartialAggs", _connStr);
+ foreach (var a in aggs)
+ {
+ DataRow row = bl.GetNewRow();
+ row["HashID"] = a.HashID;
+ row["TimeInterval"] = a.TimeInterval;
+ row["ObjectID"] = (object)a.ObjectID ?? DBNull.Value;
+ row["DBID"] = (object)a.DBID ?? DBNull.Value;
+ row["AppNameID"] = a.AppNameID;
+ row["LoginNameID"] = a.LoginNameID;
+ row["StartingEvents"] = a.StartingEvents;
+ row["CompletedEvents"] = a.CompletedEvents;
+ row["AttentionEvents"] = a.AttentionEvents;
+ row["MinDuration"] = (object)a.MinDuration ?? DBNull.Value;
+ row["MaxDuration"] = (object)a.MaxDuration ?? DBNull.Value;
+ row["TotalDuration"] = (object)a.TotalDuration ?? DBNull.Value;
+ row["MinReads"] = (object)a.MinReads ?? DBNull.Value;
+ row["MaxReads"] = (object)a.MaxReads ?? DBNull.Value;
+ row["TotalReads"] = (object)a.TotalReads ?? DBNull.Value;
+ row["MinWrites"] = (object)a.MinWrites ?? DBNull.Value;
+ row["MaxWrites"] = (object)a.MaxWrites ?? DBNull.Value;
+ row["TotalWrites"] = (object)a.TotalWrites ?? DBNull.Value;
+ row["MinCPU"] = (object)a.MinCPU ?? DBNull.Value;
+ row["MaxCPU"] = (object)a.MaxCPU ?? DBNull.Value;
+ row["TotalCPU"] = (object)a.TotalCPU ?? DBNull.Value;
+ bl.InsertRow(row);
+ TotalRowsInserted++;
+ }
+ bl.Close();
+ }
+
+ public void Dispose()
+ {
+ if (!_disposed)
+ {
+ _disposed = true;
+ }
+ }
+ }
+}
diff --git a/TraceEventImporter/Models/TraceEvent.cs b/TraceEventImporter/Models/TraceEvent.cs
new file mode 100644
index 00000000..62ee4dc9
--- /dev/null
+++ b/TraceEventImporter/Models/TraceEvent.cs
@@ -0,0 +1,139 @@
+using System;
+
+namespace TraceEventImporter.Models
+{
+ public enum TraceEventType
+ {
+ Unknown = 0,
+ RpcCompleted = 10,
+ RpcStarting = 11,
+ SqlBatchCompleted = 12,
+ SqlBatchStarting = 13,
+ AuditLogin = 14,
+ AuditLogout = 15,
+ Attention = 16,
+ ExistingConnection = 17,
+ DtcTransaction = 19,
+ StmtStarting = 40,
+ StmtCompleted = 41,
+ SpStarting = 42,
+ SpCompleted = 43,
+ SpStmtStarting = 44,
+ SpStmtCompleted = 45,
+ ShowplanAll = 97,
+ ShowplanStatisticsProfile = 146,
+ StmtRecompile = 166,
+ }
+
+ public class TraceEvent
+ {
+ public long Seq { get; set; }
+ public TraceEventType EventType { get; set; }
+ public int EventId { get; set; }
+ public int SessionId { get; set; }
+ public int RequestId { get; set; }
+ public long ConnId { get; set; }
+ public int DatabaseId { get; set; }
+ public DateTime? StartTime { get; set; }
+ public DateTime? EndTime { get; set; }
+ public long? Duration { get; set; }
+ public long? CPU { get; set; }
+ public long? Reads { get; set; }
+ public long? Writes { get; set; }
+ public long? RowCount { get; set; }
+ public string TextData { get; set; }
+ public int? ObjectId { get; set; }
+ public string ObjectName { get; set; }
+ public int? NestLevel { get; set; }
+ public string ApplicationName { get; set; }
+ public string LoginName { get; set; }
+ public string HostName { get; set; }
+ public string NTDomainName { get; set; }
+ public string NTUserName { get; set; }
+ public int? Error { get; set; }
+ public int? EventSubclass { get; set; }
+ public int? IntegerData { get; set; }
+ public int? Severity { get; set; }
+ public int? State { get; set; }
+ public int? Offset { get; set; }
+ public int? LineNumber { get; set; }
+
+ public bool IsStartingEvent
+ {
+ get
+ {
+ switch (EventType)
+ {
+ case TraceEventType.SqlBatchStarting:
+ case TraceEventType.RpcStarting:
+ case TraceEventType.SpStmtStarting:
+ case TraceEventType.StmtStarting:
+ case TraceEventType.SpStarting:
+ return true;
+ default:
+ return false;
+ }
+ }
+ }
+
+ public bool IsCompletedEvent
+ {
+ get
+ {
+ switch (EventType)
+ {
+ case TraceEventType.SqlBatchCompleted:
+ case TraceEventType.RpcCompleted:
+ case TraceEventType.SpStmtCompleted:
+ case TraceEventType.StmtCompleted:
+ case TraceEventType.SpCompleted:
+ return true;
+ default:
+ return false;
+ }
+ }
+ }
+
+ public bool IsBatchEvent
+ {
+ get
+ {
+ switch (EventType)
+ {
+ case TraceEventType.SqlBatchStarting:
+ case TraceEventType.SqlBatchCompleted:
+ case TraceEventType.RpcStarting:
+ case TraceEventType.RpcCompleted:
+ return true;
+ default:
+ return false;
+ }
+ }
+ }
+
+ public bool IsStatementEvent
+ {
+ get
+ {
+ switch (EventType)
+ {
+ case TraceEventType.SpStmtStarting:
+ case TraceEventType.SpStmtCompleted:
+ case TraceEventType.StmtStarting:
+ case TraceEventType.StmtCompleted:
+ return true;
+ default:
+ return false;
+ }
+ }
+ }
+
+ public bool IsRpcEvent
+ {
+ get
+ {
+ return EventType == TraceEventType.RpcStarting || EventType == TraceEventType.RpcCompleted;
+ }
+ }
+ }
+}
diff --git a/TraceEventImporter/Normalization/HashComputer.cs b/TraceEventImporter/Normalization/HashComputer.cs
new file mode 100644
index 00000000..92146c45
--- /dev/null
+++ b/TraceEventImporter/Normalization/HashComputer.cs
@@ -0,0 +1,71 @@
+namespace TraceEventImporter.Normalization
+{
+ ///
+ /// Exact port of StringHash() from READ80TRACE/Analyze.cpp.
+ /// Produces a 64-bit hash from normalized SQL text using dual-hash
+ /// (One-At-A-Time + Bernstein's) with periodic DWORD swaps.
+ ///
+ public static class HashComputer
+ {
+ ///
+ /// Compute a 64-bit hash of the normalized text.
+ /// Must produce identical results to the C++ StringHash() function.
+ ///
+ /// Uppercased, whitespace-collapsed, placeholder-substituted SQL text
+ /// Special procedure ID (0 for regular batches, nonzero for sp_executesql etc.)
+ /// 64-bit hash as a signed long (matching SQL bigint storage)
+ public static long ComputeHash(string normalizedText, int specialProcId = 0)
+ {
+ if (string.IsNullOrEmpty(normalizedText))
+ return 0;
+
+ int charCount = normalizedText.Length;
+
+ // Seed: dwHighHash = charCount, dwLowHash = specialProcId
+ // In the C++ struct, dwHighHash is at offset 0 (low 32 bits of uint64),
+ // dwLowHash is at offset 4 (high 32 bits of uint64) on little-endian.
+ uint dwHighHash = (uint)charCount;
+ uint dwLowHash = (uint)specialProcId;
+
+ for (int i = 0; i < charCount; i++)
+ {
+ ushort c = normalizedText[i];
+
+ // One At A Time hash on dwLowHash
+ dwLowHash += c;
+ dwLowHash += (dwLowHash << 10);
+ dwLowHash ^= (dwLowHash >> 6);
+
+ // Bernstein's hash on dwHighHash: ((self << 5) + self) == *33
+ dwHighHash = ((dwHighHash << 5) + dwHighHash) + c;
+
+ // Swap DWORD halves to avoid funnel patterns.
+ // Check next char for '!' (33) or current position divisible by 32.
+ // When i is the last char, "next char" is the null terminator (0) in C++,
+ // which is != 33, so only the modulo check applies.
+ bool shouldSwap = (i % 32 == 0);
+ if (!shouldSwap && i + 1 < charCount)
+ {
+ shouldSwap = (normalizedText[i + 1] == '!');
+ }
+
+ if (shouldSwap)
+ {
+ uint temp = dwLowHash;
+ dwLowHash = dwHighHash;
+ dwHighHash = temp;
+ }
+ }
+
+ // Final One At A Time finalization on dwLowHash
+ dwLowHash += (dwLowHash << 3);
+ dwLowHash ^= (dwLowHash >> 11);
+ dwLowHash += (dwLowHash << 15);
+
+ // Combine into 64-bit value matching C++ union layout on little-endian:
+ // dwHighHash at offset 0 = low 32 bits, dwLowHash at offset 4 = high 32 bits
+ ulong result = ((ulong)dwLowHash << 32) | dwHighHash;
+ return unchecked((long)result);
+ }
+ }
+}
diff --git a/TraceEventImporter/Normalization/SqlTextNormalizer.cs b/TraceEventImporter/Normalization/SqlTextNormalizer.cs
new file mode 100644
index 00000000..19e4f0dd
--- /dev/null
+++ b/TraceEventImporter/Normalization/SqlTextNormalizer.cs
@@ -0,0 +1,301 @@
+using System;
+using System.Text;
+
+namespace TraceEventImporter.Normalization
+{
+ ///
+ /// Normalizes SQL text by replacing literal values with placeholders.
+ /// Implements the same normalization rules as NORMALIZEMODE in rml.y:
+ /// @variable -> @P#
+ /// 123 -> {##}
+ /// 1.23 -> {##}.{##}
+ /// 'text' -> {STR}
+ /// N'text' -> {STR}
+ /// 0xABCD -> {BS}
+ /// {GUID-here} -> {GUID}
+ /// Also: UPPER case, collapse whitespace, trim, strip comments.
+ ///
+ public static class SqlTextNormalizer
+ {
+ public static string Normalize(string sql)
+ {
+ if (string.IsNullOrEmpty(sql))
+ return string.Empty;
+
+ var result = new StringBuilder(sql.Length);
+ int i = 0;
+ int len = sql.Length;
+
+ while (i < len)
+ {
+ char c = sql[i];
+
+ // --- Line comments: -- ... \n ---
+ if (c == '-' && i + 1 < len && sql[i + 1] == '-')
+ {
+ i += 2;
+ while (i < len && sql[i] != '\n' && sql[i] != '\r')
+ i++;
+ continue;
+ }
+
+ // --- Block comments: /* ... */ ---
+ if (c == '/' && i + 1 < len && sql[i + 1] == '*')
+ {
+ i += 2;
+ int depth = 1;
+ while (i < len && depth > 0)
+ {
+ if (sql[i] == '/' && i + 1 < len && sql[i + 1] == '*')
+ {
+ depth++;
+ i += 2;
+ }
+ else if (sql[i] == '*' && i + 1 < len && sql[i + 1] == '/')
+ {
+ depth--;
+ i += 2;
+ }
+ else
+ {
+ i++;
+ }
+ }
+ continue;
+ }
+
+ // --- N'unicode string literal' ---
+ if ((c == 'N' || c == 'n') && i + 1 < len && sql[i + 1] == '\'')
+ {
+ i += 2;
+ SkipStringLiteral(sql, ref i);
+ AppendWithSpace(result, "{STR}");
+ continue;
+ }
+
+ // --- 'string literal' ---
+ if (c == '\'')
+ {
+ i++;
+ SkipStringLiteral(sql, ref i);
+ AppendWithSpace(result, "{STR}");
+ continue;
+ }
+
+ // --- Binary literal: 0xHEX ---
+ if (c == '0' && i + 1 < len && (sql[i + 1] == 'x' || sql[i + 1] == 'X'))
+ {
+ // Ensure it's not part of an identifier
+ if (i == 0 || !IsIdentChar(sql[i - 1]))
+ {
+ i += 2;
+ while (i < len && IsHexDigit(sql[i]))
+ i++;
+ AppendWithSpace(result, "{BS}");
+ continue;
+ }
+ }
+
+ // --- GUID literal: {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} ---
+ if (c == '{' && IsGuidLiteral(sql, i))
+ {
+ int end = sql.IndexOf('}', i);
+ if (end > i)
+ {
+ i = end + 1;
+ AppendWithSpace(result, "{GUID}");
+ continue;
+ }
+ }
+
+ // --- @variable or @@system_variable ---
+ if (c == '@')
+ {
+ i++;
+ if (i < len && sql[i] == '@')
+ {
+ // @@system_variable — preserve as-is
+ result.Append("@@");
+ i++;
+ while (i < len && IsIdentChar(sql[i]))
+ {
+ result.Append(char.ToUpperInvariant(sql[i]));
+ i++;
+ }
+ }
+ else
+ {
+ // @user_variable -> @P#
+ while (i < len && IsIdentChar(sql[i]))
+ i++;
+ AppendWithSpace(result, "@P#");
+ }
+ continue;
+ }
+
+ // --- Numeric literal ---
+ if (char.IsDigit(c) && (i == 0 || !IsIdentChar(sql[i - 1])))
+ {
+ bool hasDecimal = false;
+ while (i < len && char.IsDigit(sql[i]))
+ i++;
+
+ if (i < len && sql[i] == '.' && i + 1 < len && char.IsDigit(sql[i + 1]))
+ {
+ hasDecimal = true;
+ i++; // skip '.'
+ while (i < len && char.IsDigit(sql[i]))
+ i++;
+ }
+
+ // Skip scientific notation suffix (e.g., 1.5E+10)
+ if (i < len && (sql[i] == 'e' || sql[i] == 'E'))
+ {
+ i++;
+ if (i < len && (sql[i] == '+' || sql[i] == '-'))
+ i++;
+ while (i < len && char.IsDigit(sql[i]))
+ i++;
+ }
+
+ // Make sure this number isn't followed by an identifier char (e.g., part of a name like "table1")
+ if (i < len && IsIdentChar(sql[i]))
+ {
+ // It's part of an identifier — don't normalize, just output what we've consumed
+ // Actually, rewind and just output as identifier
+ // This is a simplification; the original lexer handles this more precisely
+ }
+
+ AppendWithSpace(result, hasDecimal ? "{##}.{##}" : "{##}");
+ continue;
+ }
+
+ // --- Whitespace collapsing ---
+ if (char.IsWhiteSpace(c))
+ {
+ if (result.Length > 0 && result[result.Length - 1] != ' ')
+ result.Append(' ');
+ i++;
+ while (i < len && char.IsWhiteSpace(sql[i]))
+ i++;
+ continue;
+ }
+
+ // --- Quoted identifier [name] — preserve as-is ---
+ if (c == '[')
+ {
+ // Check for showplan variable pattern [ExprNNNN] or [BMKNNNN]
+ if (IsShowplanVariable(sql, i))
+ {
+ result.Append('[');
+ i++;
+ // Output the alpha prefix, strip trailing digits
+ while (i < len && sql[i] != ']' && !char.IsDigit(sql[i]))
+ {
+ result.Append(char.ToUpperInvariant(sql[i]));
+ i++;
+ }
+ // Skip digits and closing bracket
+ while (i < len && sql[i] != ']')
+ i++;
+ if (i < len) i++; // skip ']'
+ result.Append(']');
+ continue;
+ }
+
+ // Regular quoted identifier — preserve
+ result.Append('[');
+ i++;
+ while (i < len && sql[i] != ']')
+ {
+ result.Append(char.ToUpperInvariant(sql[i]));
+ i++;
+ }
+ if (i < len)
+ {
+ result.Append(']');
+ i++; // skip ']'
+ }
+ continue;
+ }
+
+ // --- Regular character — uppercase ---
+ result.Append(char.ToUpperInvariant(c));
+ i++;
+ }
+
+ return result.ToString().Trim();
+ }
+
+ private static void SkipStringLiteral(string sql, ref int i)
+ {
+ int len = sql.Length;
+ while (i < len)
+ {
+ if (sql[i] == '\'')
+ {
+ i++;
+ // Escaped quote ''
+ if (i < len && sql[i] == '\'')
+ {
+ i++;
+ continue;
+ }
+ return;
+ }
+ i++;
+ }
+ }
+
+ private static bool IsHexDigit(char c)
+ {
+ return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F');
+ }
+
+ private static bool IsIdentChar(char c)
+ {
+ return char.IsLetterOrDigit(c) || c == '_' || c == '#' || c == '$';
+ }
+
+ private static bool IsGuidLiteral(string sql, int pos)
+ {
+ // {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} = 38 chars
+ if (pos + 37 >= sql.Length) return false;
+ if (sql[pos + 9] != '-') return false;
+ if (sql[pos + 14] != '-') return false;
+ if (sql[pos + 19] != '-') return false;
+ if (sql[pos + 24] != '-') return false;
+ if (sql[pos + 37] != '}') return false;
+ return true;
+ }
+
+ private static bool IsShowplanVariable(string sql, int pos)
+ {
+ // Matches [ExprNNNN] or [BMKNNNN] patterns
+ if (pos + 5 >= sql.Length) return false;
+ int i = pos + 1;
+ string prefix = "";
+ while (i < sql.Length && char.IsLetter(sql[i]))
+ {
+ prefix += sql[i];
+ i++;
+ }
+
+ if (prefix.Length == 0) return false;
+
+ string upper = prefix.ToUpperInvariant();
+ if (upper != "EXPR" && upper != "BMK" && upper != "UNION" && upper != "CONST")
+ return false;
+
+ // Must be followed by digits then ']'
+ if (i >= sql.Length || !char.IsDigit(sql[i])) return false;
+ while (i < sql.Length && char.IsDigit(sql[i])) i++;
+ return (i < sql.Length && sql[i] == ']');
+ }
+
+ private static void AppendWithSpace(StringBuilder sb, string text)
+ {
+ sb.Append(text);
+ }
+ }
+}
diff --git a/TraceEventImporter/Processing/Aggregator.cs b/TraceEventImporter/Processing/Aggregator.cs
new file mode 100644
index 00000000..b5c3dcf5
--- /dev/null
+++ b/TraceEventImporter/Processing/Aggregator.cs
@@ -0,0 +1,267 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+
+namespace TraceEventImporter.Processing
+{
+ ///
+ /// Builds tblTimeIntervals and computes per-(HashID, TimeInterval, DBID, AppNameID, LoginNameID)
+ /// aggregates for tblBatchPartialAggs and tblStmtPartialAggs.
+ ///
+ public class Aggregator
+ {
+ private readonly int _intervalSeconds;
+
+ public List TimeIntervals { get; } = new List();
+ public List BatchAggs { get; } = new List();
+ public List StmtAggs { get; } = new List();
+
+ public Aggregator(int intervalSeconds = 60)
+ {
+ _intervalSeconds = intervalSeconds > 0 ? intervalSeconds : 60;
+ }
+
+ public void Compute(List batches, List statements)
+ {
+ // Determine overall time range
+ DateTime minTime = DateTime.MaxValue;
+ DateTime maxTime = DateTime.MinValue;
+
+ foreach (var b in batches)
+ {
+ if (b.StartTime.HasValue && b.StartTime.Value < minTime) minTime = b.StartTime.Value;
+ if (b.EndTime.HasValue && b.EndTime.Value > maxTime) maxTime = b.EndTime.Value;
+ }
+ foreach (var s in statements)
+ {
+ if (s.StartTime.HasValue && s.StartTime.Value < minTime) minTime = s.StartTime.Value;
+ if (s.EndTime.HasValue && s.EndTime.Value > maxTime) maxTime = s.EndTime.Value;
+ }
+
+ if (minTime >= maxTime)
+ return;
+
+ // Build time intervals
+ BuildTimeIntervals(minTime, maxTime);
+
+ // Build batch aggregations
+ ComputeBatchAggs(batches);
+
+ // Build statement aggregations
+ ComputeStmtAggs(statements);
+ }
+
+ private void BuildTimeIntervals(DateTime minTime, DateTime maxTime)
+ {
+ DateTime current = minTime;
+ int intervalId = 1;
+ while (current < maxTime)
+ {
+ DateTime end = current.AddSeconds(_intervalSeconds);
+ if (end > maxTime) end = maxTime;
+
+ TimeIntervals.Add(new TimeIntervalRow
+ {
+ TimeInterval = intervalId++,
+ StartTime = current,
+ EndTime = end
+ });
+
+ current = end;
+ }
+ }
+
+ private void ComputeBatchAggs(List batches)
+ {
+ // Group by HashID, TimeInterval, DBID, AppNameID, LoginNameID
+ foreach (var batch in batches)
+ {
+ int timeInterval = FindTimeInterval(batch.EndTime ?? batch.StartTime);
+ if (timeInterval <= 0) continue;
+
+ var key = new AggKey(batch.HashID, timeInterval, batch.DBID, batch.AppNameID, batch.LoginNameID);
+
+ if (!_batchAggDict.TryGetValue(key, out var agg))
+ {
+ agg = new BatchPartialAggRow
+ {
+ HashID = batch.HashID,
+ TimeInterval = timeInterval,
+ DBID = batch.DBID,
+ AppNameID = batch.AppNameID,
+ LoginNameID = batch.LoginNameID
+ };
+ _batchAggDict[key] = agg;
+ BatchAggs.Add(agg);
+ }
+
+ // Count events
+ if (batch.StartTime.HasValue)
+ agg.StartingEvents++;
+ if (batch.EndTime.HasValue)
+ agg.CompletedEvents++;
+ if (batch.AttnSeq.HasValue)
+ agg.AttentionEvents++;
+
+ // Aggregate metrics (from completed events only)
+ if (batch.Duration.HasValue)
+ {
+ agg.TotalDuration = (agg.TotalDuration ?? 0) + batch.Duration.Value;
+ agg.MinDuration = agg.MinDuration.HasValue ? Math.Min(agg.MinDuration.Value, batch.Duration.Value) : batch.Duration.Value;
+ agg.MaxDuration = agg.MaxDuration.HasValue ? Math.Max(agg.MaxDuration.Value, batch.Duration.Value) : batch.Duration.Value;
+ }
+ if (batch.Reads.HasValue)
+ {
+ agg.TotalReads = (agg.TotalReads ?? 0) + batch.Reads.Value;
+ agg.MinReads = agg.MinReads.HasValue ? Math.Min(agg.MinReads.Value, batch.Reads.Value) : batch.Reads.Value;
+ agg.MaxReads = agg.MaxReads.HasValue ? Math.Max(agg.MaxReads.Value, batch.Reads.Value) : batch.Reads.Value;
+ }
+ if (batch.Writes.HasValue)
+ {
+ agg.TotalWrites = (agg.TotalWrites ?? 0) + batch.Writes.Value;
+ agg.MinWrites = agg.MinWrites.HasValue ? Math.Min(agg.MinWrites.Value, batch.Writes.Value) : batch.Writes.Value;
+ agg.MaxWrites = agg.MaxWrites.HasValue ? Math.Max(agg.MaxWrites.Value, batch.Writes.Value) : batch.Writes.Value;
+ }
+ if (batch.CPU.HasValue)
+ {
+ agg.TotalCPU = (agg.TotalCPU ?? 0) + batch.CPU.Value;
+ agg.MinCPU = agg.MinCPU.HasValue ? Math.Min(agg.MinCPU.Value, batch.CPU.Value) : batch.CPU.Value;
+ agg.MaxCPU = agg.MaxCPU.HasValue ? Math.Max(agg.MaxCPU.Value, batch.CPU.Value) : batch.CPU.Value;
+ }
+ }
+ }
+
+ private void ComputeStmtAggs(List statements)
+ {
+ foreach (var stmt in statements)
+ {
+ int timeInterval = FindTimeInterval(stmt.EndTime ?? stmt.StartTime);
+ if (timeInterval <= 0) continue;
+
+ var key = new AggKey(stmt.HashID, timeInterval, stmt.DBID, stmt.AppNameID, stmt.LoginNameID);
+
+ if (!_stmtAggDict.TryGetValue(key, out var agg))
+ {
+ agg = new StmtPartialAggRow
+ {
+ HashID = stmt.HashID,
+ TimeInterval = timeInterval,
+ ObjectID = stmt.ObjectID,
+ DBID = stmt.DBID,
+ AppNameID = stmt.AppNameID,
+ LoginNameID = stmt.LoginNameID
+ };
+ _stmtAggDict[key] = agg;
+ StmtAggs.Add(agg);
+ }
+
+ if (stmt.StartTime.HasValue) agg.StartingEvents++;
+ if (stmt.EndTime.HasValue) agg.CompletedEvents++;
+ if (stmt.AttnSeq.HasValue) agg.AttentionEvents++;
+
+ if (stmt.Duration.HasValue)
+ {
+ agg.TotalDuration = (agg.TotalDuration ?? 0) + stmt.Duration.Value;
+ agg.MinDuration = agg.MinDuration.HasValue ? Math.Min(agg.MinDuration.Value, stmt.Duration.Value) : stmt.Duration.Value;
+ agg.MaxDuration = agg.MaxDuration.HasValue ? Math.Max(agg.MaxDuration.Value, stmt.Duration.Value) : stmt.Duration.Value;
+ }
+ if (stmt.Reads.HasValue)
+ {
+ agg.TotalReads = (agg.TotalReads ?? 0) + stmt.Reads.Value;
+ agg.MinReads = agg.MinReads.HasValue ? Math.Min(agg.MinReads.Value, stmt.Reads.Value) : stmt.Reads.Value;
+ agg.MaxReads = agg.MaxReads.HasValue ? Math.Max(agg.MaxReads.Value, stmt.Reads.Value) : stmt.Reads.Value;
+ }
+ if (stmt.Writes.HasValue)
+ {
+ agg.TotalWrites = (agg.TotalWrites ?? 0) + stmt.Writes.Value;
+ agg.MinWrites = agg.MinWrites.HasValue ? Math.Min(agg.MinWrites.Value, stmt.Writes.Value) : stmt.Writes.Value;
+ agg.MaxWrites = agg.MaxWrites.HasValue ? Math.Max(agg.MaxWrites.Value, stmt.Writes.Value) : stmt.Writes.Value;
+ }
+ if (stmt.CPU.HasValue)
+ {
+ agg.TotalCPU = (agg.TotalCPU ?? 0) + stmt.CPU.Value;
+ agg.MinCPU = agg.MinCPU.HasValue ? Math.Min(agg.MinCPU.Value, stmt.CPU.Value) : stmt.CPU.Value;
+ agg.MaxCPU = agg.MaxCPU.HasValue ? Math.Max(agg.MaxCPU.Value, stmt.CPU.Value) : stmt.CPU.Value;
+ }
+ }
+ }
+
+ private int FindTimeInterval(DateTime? time)
+ {
+ if (!time.HasValue || TimeIntervals.Count == 0) return 0;
+ DateTime t = time.Value;
+ foreach (var ti in TimeIntervals)
+ {
+ if (t >= ti.StartTime && t < ti.EndTime)
+ return ti.TimeInterval;
+ }
+ // If after last interval, use the last one
+ return TimeIntervals[TimeIntervals.Count - 1].TimeInterval;
+ }
+
+ private readonly Dictionary _batchAggDict = new Dictionary();
+ private readonly Dictionary _stmtAggDict = new Dictionary();
+
+ private struct AggKey : IEquatable
+ {
+ public readonly long HashID;
+ public readonly int TimeInterval;
+ public readonly int DBID;
+ public readonly int AppNameID;
+ public readonly int LoginNameID;
+
+ public AggKey(long hashId, int ti, int dbid, int appId, int loginId)
+ {
+ HashID = hashId; TimeInterval = ti; DBID = dbid; AppNameID = appId; LoginNameID = loginId;
+ }
+
+ public bool Equals(AggKey o) => HashID == o.HashID && TimeInterval == o.TimeInterval && DBID == o.DBID && AppNameID == o.AppNameID && LoginNameID == o.LoginNameID;
+ public override bool Equals(object obj) => obj is AggKey k && Equals(k);
+ public override int GetHashCode() => HashID.GetHashCode() ^ (TimeInterval * 397) ^ (DBID * 31) ^ AppNameID ^ LoginNameID;
+ }
+ }
+
+ #region Aggregation Row Types
+
+ public class TimeIntervalRow
+ {
+ public int TimeInterval;
+ public DateTime StartTime;
+ public DateTime EndTime;
+ }
+
+ public class BatchPartialAggRow
+ {
+ public long HashID;
+ public int TimeInterval;
+ public int StartingEvents;
+ public int CompletedEvents;
+ public int AttentionEvents;
+ public long? MinDuration, MaxDuration, TotalDuration;
+ public long? MinReads, MaxReads, TotalReads;
+ public long? MinWrites, MaxWrites, TotalWrites;
+ public long? MinCPU, MaxCPU, TotalCPU;
+ public int AppNameID;
+ public int LoginNameID;
+ public int DBID;
+ }
+
+ public class StmtPartialAggRow
+ {
+ public long HashID;
+ public int TimeInterval;
+ public int? ObjectID;
+ public int? DBID;
+ public int AppNameID;
+ public int LoginNameID;
+ public int StartingEvents;
+ public int CompletedEvents;
+ public int AttentionEvents;
+ public long? MinDuration, MaxDuration, TotalDuration;
+ public long? MinReads, MaxReads, TotalReads;
+ public long? MinWrites, MaxWrites, TotalWrites;
+ public long? MinCPU, MaxCPU, TotalCPU;
+ }
+
+ #endregion
+}
diff --git a/TraceEventImporter/Processing/EventProcessor.cs b/TraceEventImporter/Processing/EventProcessor.cs
new file mode 100644
index 00000000..9f0bc781
--- /dev/null
+++ b/TraceEventImporter/Processing/EventProcessor.cs
@@ -0,0 +1,546 @@
+using System;
+using System.Collections.Generic;
+using TraceEventImporter.Models;
+using TraceEventImporter.Normalization;
+
+namespace TraceEventImporter.Processing
+{
+ ///
+ /// Correlates Starting↔Completed events per session+request, normalizes SQL text,
+ /// computes HashIDs, and produces rows for tblBatches, tblStatements, tblConnections,
+ /// and tblInterestingEvents.
+ ///
+ public class EventProcessor
+ {
+ private readonly UniqueStore _store;
+
+ // Pending batch events awaiting their Completed counterpart
+ private readonly Dictionary _pendingBatches =
+ new Dictionary();
+
+ // Pending statement events (nested via stack per session+request)
+ private readonly Dictionary> _pendingStatements =
+ new Dictionary>();
+
+ // Connection tracking (login→logout)
+ private readonly Dictionary _connections =
+ new Dictionary();
+
+ // Sequence counters
+ private long _batchSeq;
+ private long _stmtSeq;
+ private long _connSeq;
+
+ // Collected rows for bulk insert
+ public List Batches { get; } = new List();
+ public List Statements { get; } = new List();
+ public List Connections { get; } = new List();
+ public List InterestingEvents { get; } = new List();
+
+ public EventProcessor(UniqueStore store)
+ {
+ _store = store;
+ }
+
+ public void ProcessEvent(TraceEvent evt)
+ {
+ _store.AddTracedEvent(evt.EventId);
+
+ switch (evt.EventType)
+ {
+ case TraceEventType.SqlBatchStarting:
+ case TraceEventType.RpcStarting:
+ HandleBatchStarting(evt);
+ break;
+
+ case TraceEventType.SqlBatchCompleted:
+ case TraceEventType.RpcCompleted:
+ HandleBatchCompleted(evt);
+ break;
+
+ case TraceEventType.SpStmtStarting:
+ case TraceEventType.StmtStarting:
+ HandleStatementStarting(evt);
+ break;
+
+ case TraceEventType.SpStmtCompleted:
+ case TraceEventType.StmtCompleted:
+ HandleStatementCompleted(evt);
+ break;
+
+ case TraceEventType.AuditLogin:
+ case TraceEventType.ExistingConnection:
+ HandleLogin(evt);
+ break;
+
+ case TraceEventType.AuditLogout:
+ HandleLogout(evt);
+ break;
+
+ case TraceEventType.Attention:
+ HandleAttention(evt);
+ break;
+
+ default:
+ // Interesting events (recompile, autogrow, etc.)
+ if (evt.EventType != TraceEventType.Unknown)
+ HandleInterestingEvent(evt);
+ break;
+ }
+ }
+
+ ///
+ /// Finalize: flush any pending connections that never got a logout event.
+ ///
+ public void Finalize()
+ {
+ foreach (var conn in _connections.Values)
+ {
+ Connections.Add(new ConnectionRow
+ {
+ ConnSeq = conn.ConnSeq,
+ Session = conn.SessionId,
+ StartTime = conn.StartTime,
+ EndTime = null,
+ Duration = null,
+ Reads = null,
+ Writes = null,
+ CPU = null,
+ ApplicationName = conn.AppName,
+ LoginName = conn.LoginName,
+ HostName = conn.HostName,
+ NTDomainName = conn.NTDomainName,
+ NTUserName = conn.NTUserName,
+ StartSeq = conn.StartSeq,
+ EndSeq = null,
+ TextData = conn.TextData
+ });
+ }
+ _connections.Clear();
+ }
+
+ #region Batch Handling
+
+ private void HandleBatchStarting(TraceEvent evt)
+ {
+ var key = new SessionRequestKey(evt.SessionId, evt.RequestId);
+ _pendingBatches[key] = new PendingBatch
+ {
+ StartSeq = evt.Seq,
+ StartTime = evt.StartTime,
+ TextData = evt.TextData,
+ ObjectName = evt.ObjectName,
+ IsRpc = evt.IsRpcEvent,
+ DatabaseId = evt.DatabaseId,
+ ApplicationName = evt.ApplicationName,
+ LoginName = evt.LoginName
+ };
+ }
+
+ private void HandleBatchCompleted(TraceEvent evt)
+ {
+ var key = new SessionRequestKey(evt.SessionId, evt.RequestId);
+ long batchSeq = ++_batchSeq;
+
+ PendingBatch pending = null;
+ _pendingBatches.TryGetValue(key, out pending);
+ if (pending != null)
+ _pendingBatches.Remove(key);
+
+ // Use completed event's text, fall back to starting event's text
+ string textData = evt.TextData ?? pending?.TextData;
+ string objectName = evt.ObjectName ?? pending?.ObjectName;
+ bool isRpc = evt.IsRpcEvent || (pending?.IsRpc ?? false);
+
+ // Determine special proc and normalize
+ byte specialProcId = isRpc ? SpecialProcDetector.GetSpecialProcId(objectName) : (byte)0;
+ string normText = SqlTextNormalizer.Normalize(textData);
+ long hashId = HashComputer.ComputeHash(normText, specialProcId);
+
+ // Add to unique store
+ _store.TryAddBatch(hashId, textData, normText, specialProcId);
+
+ // Track procedure name
+ if (isRpc && !string.IsNullOrEmpty(objectName))
+ {
+ _store.AddProcedureName(
+ evt.DatabaseId,
+ evt.ObjectId ?? 0,
+ specialProcId,
+ objectName);
+ }
+
+ int appNameId = _store.GetOrAddAppName(evt.ApplicationName ?? pending?.ApplicationName);
+ int loginNameId = _store.GetOrAddLoginName(evt.LoginName ?? pending?.LoginName);
+
+ var row = new BatchRow
+ {
+ BatchSeq = batchSeq,
+ HashID = hashId,
+ Session = evt.SessionId,
+ Request = evt.RequestId,
+ ConnId = evt.ConnId,
+ StartTime = pending?.StartTime ?? evt.StartTime,
+ EndTime = evt.EndTime ?? evt.StartTime,
+ Duration = evt.Duration,
+ Reads = evt.Reads,
+ Writes = evt.Writes,
+ CPU = evt.CPU,
+ fRPCEvent = (byte)(isRpc ? 1 : 0),
+ DBID = evt.DatabaseId,
+ StartSeq = pending?.StartSeq,
+ EndSeq = evt.Seq,
+ AttnSeq = null,
+ ConnSeq = null,
+ TextData = textData,
+ OrigRowCount = evt.RowCount,
+ AppNameID = appNameId,
+ LoginNameID = loginNameId
+ };
+
+ Batches.Add(row);
+ }
+
+ #endregion
+
+ #region Statement Handling
+
+ private void HandleStatementStarting(TraceEvent evt)
+ {
+ var key = new SessionRequestKey(evt.SessionId, evt.RequestId);
+ if (!_pendingStatements.ContainsKey(key))
+ _pendingStatements[key] = new Stack();
+
+ _pendingStatements[key].Push(new PendingStatement
+ {
+ StartSeq = evt.Seq,
+ StartTime = evt.StartTime,
+ TextData = evt.TextData,
+ ObjectId = evt.ObjectId,
+ NestLevel = evt.NestLevel
+ });
+ }
+
+ private void HandleStatementCompleted(TraceEvent evt)
+ {
+ var key = new SessionRequestKey(evt.SessionId, evt.RequestId);
+ long stmtSeq = ++_stmtSeq;
+
+ PendingStatement pending = null;
+ if (_pendingStatements.TryGetValue(key, out var stack) && stack.Count > 0)
+ pending = stack.Pop();
+
+ string textData = evt.TextData ?? pending?.TextData;
+ string normText = SqlTextNormalizer.Normalize(textData);
+ long hashId = HashComputer.ComputeHash(normText);
+
+ _store.TryAddStatement(hashId, textData, normText);
+
+ // Find the parent batch
+ long? batchSeq = null;
+ if (Batches.Count > 0)
+ {
+ // Find the most recent batch for this session+request
+ for (int i = Batches.Count - 1; i >= 0; i--)
+ {
+ if (Batches[i].Session == evt.SessionId && Batches[i].Request == evt.RequestId)
+ {
+ batchSeq = Batches[i].BatchSeq;
+ break;
+ }
+ }
+ }
+
+ int appNameId = _store.GetOrAddAppName(evt.ApplicationName);
+ int loginNameId = _store.GetOrAddLoginName(evt.LoginName);
+
+ var row = new StatementRow
+ {
+ StmtSeq = stmtSeq,
+ HashID = hashId,
+ Session = evt.SessionId,
+ Request = evt.RequestId,
+ ConnId = evt.ConnId,
+ StartTime = pending?.StartTime ?? evt.StartTime,
+ EndTime = evt.EndTime ?? evt.StartTime,
+ Duration = evt.Duration,
+ Reads = evt.Reads,
+ Writes = evt.Writes,
+ CPU = evt.CPU,
+ Rows = evt.RowCount,
+ DBID = evt.DatabaseId,
+ ObjectID = evt.ObjectId ?? pending?.ObjectId,
+ NestLevel = evt.NestLevel ?? pending?.NestLevel,
+ fDynamicSQL = false,
+ StartSeq = pending?.StartSeq,
+ EndSeq = evt.Seq,
+ ConnSeq = null,
+ BatchSeq = batchSeq,
+ ParentStmtSeq = null, // Filled in post-load fixups
+ AttnSeq = null,
+ TextData = textData,
+ AppNameID = appNameId,
+ LoginNameID = loginNameId
+ };
+
+ Statements.Add(row);
+ }
+
+ #endregion
+
+ #region Connection Handling
+
+ private void HandleLogin(TraceEvent evt)
+ {
+ long connSeq = ++_connSeq;
+ _connections[evt.SessionId] = new ConnectionInfo
+ {
+ ConnSeq = connSeq,
+ SessionId = evt.SessionId,
+ StartTime = evt.StartTime,
+ StartSeq = evt.Seq,
+ AppName = evt.ApplicationName,
+ LoginName = evt.LoginName,
+ HostName = evt.HostName,
+ NTDomainName = evt.NTDomainName,
+ NTUserName = evt.NTUserName,
+ TextData = evt.TextData
+ };
+ }
+
+ private void HandleLogout(TraceEvent evt)
+ {
+ if (_connections.TryGetValue(evt.SessionId, out var conn))
+ {
+ Connections.Add(new ConnectionRow
+ {
+ ConnSeq = conn.ConnSeq,
+ Session = conn.SessionId,
+ StartTime = conn.StartTime,
+ EndTime = evt.StartTime,
+ Duration = evt.Duration,
+ Reads = evt.Reads,
+ Writes = evt.Writes,
+ CPU = evt.CPU,
+ ApplicationName = conn.AppName,
+ LoginName = conn.LoginName,
+ HostName = conn.HostName,
+ NTDomainName = conn.NTDomainName,
+ NTUserName = conn.NTUserName,
+ StartSeq = conn.StartSeq,
+ EndSeq = evt.Seq,
+ TextData = conn.TextData
+ });
+ _connections.Remove(evt.SessionId);
+ }
+ }
+
+ #endregion
+
+ #region Attention & Interesting Events
+
+ private void HandleAttention(TraceEvent evt)
+ {
+ // Find the most recent batch for this session and mark it with AttnSeq
+ for (int i = Batches.Count - 1; i >= 0; i--)
+ {
+ if (Batches[i].Session == evt.SessionId && Batches[i].AttnSeq == null)
+ {
+ Batches[i].AttnSeq = evt.Seq;
+ break;
+ }
+ }
+ }
+
+ private void HandleInterestingEvent(TraceEvent evt)
+ {
+ // Find parent batch
+ long? batchSeq = null;
+ for (int i = Batches.Count - 1; i >= 0; i--)
+ {
+ if (Batches[i].Session == evt.SessionId)
+ {
+ batchSeq = Batches[i].BatchSeq;
+ break;
+ }
+ }
+
+ InterestingEvents.Add(new InterestingEventRow
+ {
+ Seq = evt.Seq,
+ EventID = evt.EventId,
+ Session = evt.SessionId,
+ Request = evt.RequestId,
+ ConnId = evt.ConnId,
+ StartTime = evt.StartTime,
+ EndTime = evt.EndTime,
+ Duration = evt.Duration,
+ DBID = evt.DatabaseId,
+ IntegerData = evt.IntegerData,
+ EventSubclass = evt.EventSubclass,
+ TextData = evt.TextData?.Length > 1000
+ ? evt.TextData.Substring(0, 1000)
+ : evt.TextData,
+ ObjectID = evt.ObjectId,
+ Error = evt.Error,
+ BatchSeq = batchSeq,
+ Severity = evt.Severity,
+ State = evt.State
+ });
+ }
+
+ #endregion
+ }
+
+ #region Row Types
+
+ public class BatchRow
+ {
+ public long BatchSeq;
+ public long HashID;
+ public int Session;
+ public int Request;
+ public long ConnId;
+ public DateTime? StartTime;
+ public DateTime? EndTime;
+ public long? Duration;
+ public long? Reads;
+ public long? Writes;
+ public long? CPU;
+ public byte fRPCEvent;
+ public int DBID;
+ public long? StartSeq;
+ public long? EndSeq;
+ public long? AttnSeq;
+ public long? ConnSeq;
+ public string TextData;
+ public long? OrigRowCount;
+ // For aggregation — not stored in table
+ public int AppNameID;
+ public int LoginNameID;
+ }
+
+ public class StatementRow
+ {
+ public long StmtSeq;
+ public long HashID;
+ public int Session;
+ public int Request;
+ public long ConnId;
+ public DateTime? StartTime;
+ public DateTime? EndTime;
+ public long? Duration;
+ public long? Reads;
+ public long? Writes;
+ public long? CPU;
+ public long? Rows;
+ public int DBID;
+ public int? ObjectID;
+ public int? NestLevel;
+ public bool fDynamicSQL;
+ public long? StartSeq;
+ public long? EndSeq;
+ public long? ConnSeq;
+ public long? BatchSeq;
+ public long? ParentStmtSeq;
+ public long? AttnSeq;
+ public string TextData;
+ // For aggregation — not stored in table
+ public int AppNameID;
+ public int LoginNameID;
+ }
+
+ public class ConnectionRow
+ {
+ public long ConnSeq;
+ public int Session;
+ public DateTime? StartTime;
+ public DateTime? EndTime;
+ public long? Duration;
+ public long? Reads;
+ public long? Writes;
+ public long? CPU;
+ public string ApplicationName;
+ public string LoginName;
+ public string HostName;
+ public string NTDomainName;
+ public string NTUserName;
+ public long? StartSeq;
+ public long? EndSeq;
+ public string TextData;
+ }
+
+ public class InterestingEventRow
+ {
+ public long Seq;
+ public int EventID;
+ public int Session;
+ public int Request;
+ public long ConnId;
+ public DateTime? StartTime;
+ public DateTime? EndTime;
+ public long? Duration;
+ public int DBID;
+ public int? IntegerData;
+ public int? EventSubclass;
+ public string TextData;
+ public int? ObjectID;
+ public int? Error;
+ public long? BatchSeq;
+ public int? Severity;
+ public int? State;
+ }
+
+ internal struct SessionRequestKey : IEquatable
+ {
+ public readonly int Session;
+ public readonly int Request;
+
+ public SessionRequestKey(int session, int request)
+ {
+ Session = session;
+ Request = request;
+ }
+
+ public bool Equals(SessionRequestKey other) => Session == other.Session && Request == other.Request;
+ public override bool Equals(object obj) => obj is SessionRequestKey k && Equals(k);
+ public override int GetHashCode() => (Session * 397) ^ Request;
+ }
+
+ internal class PendingBatch
+ {
+ public long StartSeq;
+ public DateTime? StartTime;
+ public string TextData;
+ public string ObjectName;
+ public bool IsRpc;
+ public int DatabaseId;
+ public string ApplicationName;
+ public string LoginName;
+ }
+
+ internal class PendingStatement
+ {
+ public long StartSeq;
+ public DateTime? StartTime;
+ public string TextData;
+ public int? ObjectId;
+ public int? NestLevel;
+ }
+
+ internal class ConnectionInfo
+ {
+ public long ConnSeq;
+ public int SessionId;
+ public DateTime? StartTime;
+ public long StartSeq;
+ public string AppName;
+ public string LoginName;
+ public string HostName;
+ public string NTDomainName;
+ public string NTUserName;
+ public string TextData;
+ }
+
+ #endregion
+}
diff --git a/TraceEventImporter/Processing/SpecialProcDetector.cs b/TraceEventImporter/Processing/SpecialProcDetector.cs
new file mode 100644
index 00000000..c37947da
--- /dev/null
+++ b/TraceEventImporter/Processing/SpecialProcDetector.cs
@@ -0,0 +1,64 @@
+using System;
+using System.Collections.Generic;
+
+namespace TraceEventImporter.Processing
+{
+ ///
+ /// Detects the 17 special stored procedures that get special handling during
+ /// normalization and hash computation. The SpecialProcID is used as a seed
+ /// component in the StringHash function.
+ ///
+ public static class SpecialProcDetector
+ {
+ private static readonly Dictionary SpecialProcs =
+ new Dictionary(StringComparer.OrdinalIgnoreCase)
+ {
+ { "sp_prepare", 1 },
+ { "sp_execute", 2 },
+ { "sp_executesql", 3 },
+ { "sp_prepexec", 4 },
+ { "sp_cursoropen", 5 },
+ { "sp_cursorclose", 6 },
+ { "sp_cursorfetch", 7 },
+ { "sp_cursorexecute", 8 },
+ { "sp_cursorprepare", 9 },
+ { "sp_cursorprepexec", 10 },
+ { "sp_cursorunprepare", 11 },
+ { "sp_cursor", 12 },
+ { "sp_unprepare", 13 },
+ { "sp_getschemalock", 14 },
+ { "sp_releaseschemalock", 15 },
+ { "sp_reset_connection", 16 },
+ { "sp_refreshview", 17 },
+ };
+
+ ///
+ /// Returns the SpecialProcID for a stored procedure name, or 0 if not special.
+ /// Handles fully qualified names (e.g., "master.dbo.sp_executesql").
+ ///
+ public static byte GetSpecialProcId(string procName)
+ {
+ if (string.IsNullOrEmpty(procName))
+ return 0;
+
+ // Handle fully qualified names: extract the last part
+ string simpleName = procName;
+ int lastDot = procName.LastIndexOf('.');
+ if (lastDot >= 0 && lastDot < procName.Length - 1)
+ simpleName = procName.Substring(lastDot + 1);
+
+ if (SpecialProcs.TryGetValue(simpleName, out byte id))
+ return id;
+
+ return 0;
+ }
+
+ ///
+ /// Checks if an RPC event's object name is a special procedure.
+ ///
+ public static bool IsSpecialProc(string procName)
+ {
+ return GetSpecialProcId(procName) != 0;
+ }
+ }
+}
diff --git a/TraceEventImporter/Processing/UniqueStore.cs b/TraceEventImporter/Processing/UniqueStore.cs
new file mode 100644
index 00000000..9a2705d9
--- /dev/null
+++ b/TraceEventImporter/Processing/UniqueStore.cs
@@ -0,0 +1,162 @@
+using System;
+using System.Collections.Generic;
+
+namespace TraceEventImporter.Processing
+{
+ ///
+ /// In-memory deduplication store for unique batches, statements, app names, and login names.
+ /// First occurrence stores original + normalized text; subsequent occurrences reuse the HashID.
+ ///
+ public class UniqueStore
+ {
+ private readonly Dictionary _uniqueBatches = new Dictionary();
+ private readonly Dictionary _uniqueStatements = new Dictionary();
+ private readonly Dictionary _uniqueAppNames = new Dictionary(StringComparer.OrdinalIgnoreCase);
+ private readonly Dictionary _uniqueLoginNames = new Dictionary(StringComparer.OrdinalIgnoreCase);
+ private readonly Dictionary _procedureNames = new Dictionary(StringComparer.OrdinalIgnoreCase);
+ private readonly HashSet _tracedEventIds = new HashSet();
+
+ private long _uniqueBatchSeq;
+ private long _uniqueStmtSeq;
+ private int _appNameIdSeq;
+ private int _loginNameIdSeq;
+
+ #region Unique Batches
+
+ public bool TryAddBatch(long hashId, string origText, string normText, byte specialProcId)
+ {
+ if (_uniqueBatches.ContainsKey(hashId))
+ return false;
+
+ _uniqueBatches[hashId] = new UniqueBatch
+ {
+ Seq = ++_uniqueBatchSeq,
+ HashID = hashId,
+ OrigText = origText ?? "",
+ NormText = normText ?? "",
+ SpecialProcID = specialProcId
+ };
+ return true;
+ }
+
+ public IEnumerable GetUniqueBatches() => _uniqueBatches.Values;
+
+ #endregion
+
+ #region Unique Statements
+
+ public bool TryAddStatement(long hashId, string origText, string normText)
+ {
+ if (_uniqueStatements.ContainsKey(hashId))
+ return false;
+
+ _uniqueStatements[hashId] = new UniqueStatement
+ {
+ Seq = ++_uniqueStmtSeq,
+ HashID = hashId,
+ OrigText = origText,
+ NormText = normText
+ };
+ return true;
+ }
+
+ public IEnumerable GetUniqueStatements() => _uniqueStatements.Values;
+
+ #endregion
+
+ #region App Names
+
+ public int GetOrAddAppName(string appName)
+ {
+ if (string.IsNullOrEmpty(appName))
+ appName = "";
+
+ if (_uniqueAppNames.TryGetValue(appName, out int id))
+ return id;
+
+ id = ++_appNameIdSeq;
+ _uniqueAppNames[appName] = id;
+ return id;
+ }
+
+ public IEnumerable> GetUniqueAppNames() => _uniqueAppNames;
+
+ #endregion
+
+ #region Login Names
+
+ public int GetOrAddLoginName(string loginName)
+ {
+ if (string.IsNullOrEmpty(loginName))
+ loginName = "";
+
+ if (_uniqueLoginNames.TryGetValue(loginName, out int id))
+ return id;
+
+ id = ++_loginNameIdSeq;
+ _uniqueLoginNames[loginName] = id;
+ return id;
+ }
+
+ public IEnumerable> GetUniqueLoginNames() => _uniqueLoginNames;
+
+ #endregion
+
+ #region Procedure Names
+
+ public void AddProcedureName(int dbid, int objectId, byte specialProcId, string name)
+ {
+ string key = $"{dbid}_{objectId}_{specialProcId}";
+ if (!_procedureNames.ContainsKey(key))
+ {
+ _procedureNames[key] = new ProcedureInfo
+ {
+ DBID = dbid,
+ ObjectID = objectId,
+ SpecialProcID = specialProcId,
+ Name = name
+ };
+ }
+ }
+
+ public IEnumerable GetProcedureNames() => _procedureNames.Values;
+
+ #endregion
+
+ #region Traced Events
+
+ public void AddTracedEvent(int eventId)
+ {
+ _tracedEventIds.Add(eventId);
+ }
+
+ public IEnumerable GetTracedEventIds() => _tracedEventIds;
+
+ #endregion
+ }
+
+ public class UniqueBatch
+ {
+ public long Seq { get; set; }
+ public long HashID { get; set; }
+ public string OrigText { get; set; }
+ public string NormText { get; set; }
+ public byte SpecialProcID { get; set; }
+ }
+
+ public class UniqueStatement
+ {
+ public long Seq { get; set; }
+ public long HashID { get; set; }
+ public string OrigText { get; set; }
+ public string NormText { get; set; }
+ }
+
+ public class ProcedureInfo
+ {
+ public int DBID { get; set; }
+ public int ObjectID { get; set; }
+ public byte SpecialProcID { get; set; }
+ public string Name { get; set; }
+ }
+}
diff --git a/TraceEventImporter/Readers/ITraceEventReader.cs b/TraceEventImporter/Readers/ITraceEventReader.cs
new file mode 100644
index 00000000..a59dc0f5
--- /dev/null
+++ b/TraceEventImporter/Readers/ITraceEventReader.cs
@@ -0,0 +1,24 @@
+using System.Collections.Generic;
+using TraceEventImporter.Models;
+
+namespace TraceEventImporter.Readers
+{
+ ///
+ /// Abstraction for reading trace events from different file formats (.trc, .xel).
+ /// Implementations yield events in file order with monotonically increasing Seq numbers.
+ ///
+ public interface ITraceEventReader
+ {
+ ///
+ /// Read all trace events from the specified file.
+ ///
+ /// Path to the trace file
+ /// Enumerable of trace events in file order
+ IEnumerable ReadEvents(string filePath);
+
+ ///
+ /// File extensions supported by this reader (e.g., ".trc", ".xel").
+ ///
+ string[] SupportedExtensions { get; }
+ }
+}
diff --git a/TraceEventImporter/Readers/TrcFileReader.cs b/TraceEventImporter/Readers/TrcFileReader.cs
new file mode 100644
index 00000000..5a264438
--- /dev/null
+++ b/TraceEventImporter/Readers/TrcFileReader.cs
@@ -0,0 +1,470 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using TraceEventImporter.Models;
+
+namespace TraceEventImporter.Readers
+{
+ ///
+ /// Reads SQL Server Profiler trace (.trc) files in binary format.
+ /// Implements a simplified binary parser based on the Yukon+ .trc file format
+ /// documented in READ80TRACE/trccommon.h and 80TraceIO.cpp.
+ /// No SMO or SQL Server dependency required.
+ ///
+ public class TrcFileReader : ITraceEventReader
+ {
+ // TRACE_SPECIAL_COLUMNS
+ private const ushort TRACE_BEGIN_RECORD = 0xFFF6;
+ private const ushort TRACE_FLUSH = 0xFFF2;
+ private const ushort SQL_TRACE_VERSION = 0xFFFE;
+ private const ushort TRACED_OPTIONS = 0xFFFD;
+ private const ushort TRACED_EVENTS = 0xFFFC;
+ private const ushort TRACED_FILTERS = 0xFFFB;
+ private const ushort TRACE_START = 0xFFFA;
+ private const ushort TRACE_STOP = 0xFFF9;
+ private const ushort TRACE_TEXT_FILTERED = 0xFFF5;
+
+ // Column IDs (from trccommon.h TRACE_DATA_COLUMNS)
+ private const int COL_TEXT = 1;
+ private const int COL_BINARYDATA = 2;
+ private const int COL_DBID = 3;
+ private const int COL_LINENO = 5;
+ private const int COL_NTUSERNAME = 6;
+ private const int COL_NTDOMAINNAME = 7;
+ private const int COL_HOSTNAME = 8;
+ private const int COL_APPNAME = 10;
+ private const int COL_LOGINNAME = 11;
+ private const int COL_SESSION = 12;
+ private const int COL_DURATION = 13;
+ private const int COL_STARTTIME = 14;
+ private const int COL_ENDTIME = 15;
+ private const int COL_READS = 16;
+ private const int COL_WRITES = 17;
+ private const int COL_CPU = 18;
+ private const int COL_SEVERITY = 20;
+ private const int COL_SUBCLASS = 21;
+ private const int COL_OBJID = 22;
+ private const int COL_INTDATA = 25;
+ private const int COL_CLASS = 27;
+ private const int COL_NESTLEVEL = 29;
+ private const int COL_STATE = 30;
+ private const int COL_ERROR = 31;
+ private const int COL_OBJNAME = 34;
+ private const int COL_ROWCOUNTS = 48;
+ private const int COL_REQUESTID = 49;
+ private const int COL_EVENTSEQ = 51;
+ private const int COL_OFFSET = 61;
+
+ // Max columns in trace format
+ private const int TOTAL_COLUMNS = 76;
+
+ // Fixed-size column sizes (bytes). Columns not listed are variable-length.
+ private static readonly Dictionary FixedColumnSizes = new Dictionary
+ {
+ { COL_DBID, 4 }, { 4, 4 }, { COL_LINENO, 4 }, { 9, 4 },
+ { COL_SESSION, 4 }, { COL_DURATION, 8 }, { COL_STARTTIME, 8 }, { COL_ENDTIME, 8 },
+ { COL_READS, 8 }, { COL_WRITES, 8 }, { COL_CPU, 8 }, { 19, 4 },
+ { COL_SEVERITY, 4 }, { COL_SUBCLASS, 4 }, { COL_OBJID, 4 }, { 23, 4 },
+ { 24, 4 }, { COL_INTDATA, 4 }, { COL_CLASS, 2 }, { 28, 4 },
+ { COL_NESTLEVEL, 4 }, { COL_STATE, 4 }, { COL_ERROR, 4 }, { 32, 4 },
+ { 33, 4 }, { COL_ROWCOUNTS, 8 }, { COL_REQUESTID, 4 }, { 50, 8 },
+ { COL_EVENTSEQ, 8 }, { 52, 8 }, { 53, 8 }, { 54, 16 },
+ { 55, 4 }, { 56, 4 }, { 57, 4 }, { 58, 4 },
+ { 60, 4 }, { COL_OFFSET, 4 }, { 62, 4 }, { 66, 4 },
+ { 68, 16 },
+ };
+
+ private long _globalSeq;
+
+ public TrcFileReader(long startingSeq = 0)
+ {
+ _globalSeq = startingSeq;
+ }
+
+ public string[] SupportedExtensions => new[] { ".trc" };
+
+ public IEnumerable ReadEvents(string filePath)
+ {
+ using (var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read, 65536))
+ using (var reader = new BinaryReader(stream, Encoding.Unicode))
+ {
+ // Read file header
+ var header = ReadFileHeader(reader);
+ if (header == null)
+ yield break;
+
+ bool isYukon = header.MajorVersion >= 9;
+
+ // Read events
+ while (stream.Position < stream.Length - 2)
+ {
+ TraceEvent evt = null;
+ try
+ {
+ evt = ReadNextEvent(reader, stream, isYukon);
+ }
+ catch (EndOfStreamException)
+ {
+ break;
+ }
+ catch (Exception)
+ {
+ // Skip malformed events
+ continue;
+ }
+
+ if (evt != null)
+ {
+ if (evt.Seq == 0)
+ evt.Seq = ++_globalSeq;
+ yield return evt;
+ }
+ }
+ }
+ }
+
+ private TrcHeader ReadFileHeader(BinaryReader reader)
+ {
+ try
+ {
+ var header = new TrcHeader();
+ header.UnicodeTag = reader.ReadUInt16();
+ if (header.UnicodeTag != 0xFEFF)
+ return null;
+
+ header.HeaderSize = reader.ReadUInt16();
+ header.TraceVersion = reader.ReadUInt16();
+
+ // Provider name: WCHAR[128] = 256 bytes
+ byte[] providerBytes = reader.ReadBytes(256);
+ header.ProviderName = Encoding.Unicode.GetString(providerBytes).TrimEnd('\0');
+
+ // Definition type: WCHAR[64] = 128 bytes
+ reader.ReadBytes(128);
+
+ header.MajorVersion = reader.ReadByte();
+ header.MinorVersion = reader.ReadByte();
+ header.BuildNumber = reader.ReadUInt16();
+ header.HeaderOptions = reader.ReadUInt32();
+
+ // Server name: WCHAR[128] = 256 bytes
+ byte[] serverBytes = reader.ReadBytes(256);
+ header.ServerName = Encoding.Unicode.GetString(serverBytes).TrimEnd('\0');
+
+ // RepeatBase
+ header.RepeatBase = reader.ReadUInt16();
+
+ return header;
+ }
+ catch
+ {
+ return null;
+ }
+ }
+
+ private TraceEvent ReadNextEvent(BinaryReader reader, Stream stream, bool isYukon)
+ {
+ // Find TRACE_BEGIN_RECORD marker
+ while (true)
+ {
+ if (stream.Position >= stream.Length - 2)
+ return null;
+
+ ushort marker = reader.ReadUInt16();
+
+ if (marker == 0) // EOF
+ return null;
+
+ // Handle special columns (skip them)
+ if (marker >= TRACE_FLUSH && marker <= SQL_TRACE_VERSION && marker != TRACE_BEGIN_RECORD)
+ {
+ SkipSpecialColumn(reader, marker);
+ continue;
+ }
+
+ if (marker == TRACE_BEGIN_RECORD)
+ break;
+
+ // Unexpected data — try to resync
+ if (marker < 1 || marker > TOTAL_COLUMNS)
+ continue;
+
+ // It's a column ID outside of a record context — skip
+ SkipColumnData(reader, marker);
+ }
+
+ // Read TRACE_BEGIN_RECORD data
+ ushort eventId;
+ int recordLength;
+
+ if (isYukon)
+ {
+ // TrcBeginRecordData: USHORT eventId + LONG recordLength
+ eventId = reader.ReadUInt16();
+ recordLength = reader.ReadInt32();
+ }
+ else
+ {
+ // Shiloh: SHORT eventClass + SHORT colCount packed in 4 bytes
+ int packed = reader.ReadInt32();
+ eventId = (ushort)(packed & 0xFFFF);
+ recordLength = -1; // Read until next TRACE_BEGIN_RECORD
+ }
+
+ var evt = new TraceEvent();
+ evt.EventId = eventId;
+ evt.EventType = MapEventClass(eventId);
+
+ // Read columns within the record
+ long recordStart = stream.Position;
+ long recordEnd = recordLength > 0 ? recordStart + recordLength - 6 : stream.Length; // -6 for begin record header
+
+ while (stream.Position < recordEnd - 1)
+ {
+ if (stream.Position >= stream.Length - 2)
+ break;
+
+ ushort colId = reader.ReadUInt16();
+
+ if (colId == TRACE_BEGIN_RECORD || colId == 0)
+ {
+ // We've hit the next event or EOF — back up and return
+ stream.Position -= 2;
+ break;
+ }
+
+ // Skip special markers within events
+ if (colId >= TRACE_FLUSH && colId <= SQL_TRACE_VERSION)
+ {
+ break;
+ }
+
+ if (colId == TRACE_TEXT_FILTERED)
+ colId = COL_TEXT;
+
+ try
+ {
+ ReadAndApplyColumn(reader, evt, colId);
+ }
+ catch
+ {
+ break;
+ }
+ }
+
+ return evt;
+ }
+
+ private void ReadAndApplyColumn(BinaryReader reader, TraceEvent evt, int colId)
+ {
+ if (FixedColumnSizes.TryGetValue(colId, out int fixedSize))
+ {
+ byte[] data = reader.ReadBytes(fixedSize);
+ if (data.Length < fixedSize) return;
+ ApplyFixedColumn(evt, colId, data);
+ }
+ else
+ {
+ // Variable-length: read length prefix
+ byte firstByte = reader.ReadByte();
+ int dataLen;
+ if (firstByte == 0xFF)
+ dataLen = reader.ReadInt32();
+ else
+ dataLen = firstByte;
+
+ if (dataLen <= 0 || dataLen > 10 * 1024 * 1024) // Sanity: max 10MB per column
+ return;
+
+ byte[] data = reader.ReadBytes(dataLen);
+ if (data.Length < dataLen) return;
+ ApplyVariableColumn(evt, colId, data);
+ }
+ }
+
+ private void ApplyFixedColumn(TraceEvent evt, int colId, byte[] data)
+ {
+ switch (colId)
+ {
+ case COL_DBID:
+ evt.DatabaseId = BitConverter.ToInt32(data, 0);
+ break;
+ case COL_SESSION:
+ evt.SessionId = BitConverter.ToInt32(data, 0);
+ break;
+ case COL_DURATION:
+ evt.Duration = BitConverter.ToInt64(data, 0);
+ break;
+ case COL_STARTTIME:
+ evt.StartTime = ReadSqlDateTime(data, 0);
+ break;
+ case COL_ENDTIME:
+ evt.EndTime = ReadSqlDateTime(data, 0);
+ break;
+ case COL_READS:
+ evt.Reads = BitConverter.ToInt64(data, 0);
+ break;
+ case COL_WRITES:
+ evt.Writes = BitConverter.ToInt64(data, 0);
+ break;
+ case COL_CPU:
+ evt.CPU = BitConverter.ToInt64(data, 0);
+ break;
+ case COL_OBJID:
+ evt.ObjectId = BitConverter.ToInt32(data, 0);
+ break;
+ case COL_NESTLEVEL:
+ evt.NestLevel = BitConverter.ToInt32(data, 0);
+ break;
+ case COL_ERROR:
+ evt.Error = BitConverter.ToInt32(data, 0);
+ break;
+ case COL_SEVERITY:
+ evt.Severity = BitConverter.ToInt32(data, 0);
+ break;
+ case COL_STATE:
+ evt.State = BitConverter.ToInt32(data, 0);
+ break;
+ case COL_SUBCLASS:
+ evt.EventSubclass = BitConverter.ToInt32(data, 0);
+ break;
+ case COL_INTDATA:
+ evt.IntegerData = BitConverter.ToInt32(data, 0);
+ break;
+ case COL_ROWCOUNTS:
+ evt.RowCount = BitConverter.ToInt64(data, 0);
+ break;
+ case COL_REQUESTID:
+ evt.RequestId = BitConverter.ToInt32(data, 0);
+ break;
+ case COL_EVENTSEQ:
+ evt.Seq = BitConverter.ToInt64(data, 0);
+ break;
+ case COL_OFFSET:
+ evt.Offset = BitConverter.ToInt32(data, 0);
+ break;
+ case COL_LINENO:
+ evt.LineNumber = BitConverter.ToInt32(data, 0);
+ break;
+ }
+ }
+
+ private void ApplyVariableColumn(TraceEvent evt, int colId, byte[] data)
+ {
+ switch (colId)
+ {
+ case COL_TEXT:
+ evt.TextData = Encoding.Unicode.GetString(data).TrimEnd('\0');
+ break;
+ case COL_LOGINNAME:
+ evt.LoginName = Encoding.Unicode.GetString(data).TrimEnd('\0');
+ break;
+ case COL_APPNAME:
+ evt.ApplicationName = Encoding.Unicode.GetString(data).TrimEnd('\0');
+ break;
+ case COL_HOSTNAME:
+ evt.HostName = Encoding.Unicode.GetString(data).TrimEnd('\0');
+ break;
+ case COL_NTUSERNAME:
+ evt.NTUserName = Encoding.Unicode.GetString(data).TrimEnd('\0');
+ break;
+ case COL_NTDOMAINNAME:
+ evt.NTDomainName = Encoding.Unicode.GetString(data).TrimEnd('\0');
+ break;
+ case COL_OBJNAME:
+ evt.ObjectName = Encoding.Unicode.GetString(data).TrimEnd('\0');
+ break;
+ }
+ }
+
+ private static DateTime? ReadSqlDateTime(byte[] data, int offset)
+ {
+ if (data.Length < offset + 8) return null;
+ try
+ {
+ // SQL Server datetime: 4-byte days since 1900-01-01 + 4-byte 300ths of second
+ int days = BitConverter.ToInt32(data, offset);
+ uint ticks = BitConverter.ToUInt32(data, offset + 4);
+ DateTime baseDate = new DateTime(1900, 1, 1);
+ return baseDate.AddDays(days).AddMilliseconds(ticks * 10.0 / 3.0);
+ }
+ catch
+ {
+ return null;
+ }
+ }
+
+ private void SkipSpecialColumn(BinaryReader reader, ushort marker)
+ {
+ // Special columns have variable data — read and discard
+ try
+ {
+ switch (marker)
+ {
+ case SQL_TRACE_VERSION:
+ case TRACED_OPTIONS:
+ case TRACE_START:
+ case TRACE_STOP:
+ // These have a 4-byte length followed by data
+ int len = reader.ReadInt32();
+ if (len > 0 && len < 1024 * 1024)
+ reader.ReadBytes(len);
+ break;
+ case TRACED_EVENTS:
+ case TRACED_FILTERS:
+ // Variable-length: read length prefix
+ byte fb = reader.ReadByte();
+ int dlen = fb == 0xFF ? reader.ReadInt32() : fb;
+ if (dlen > 0 && dlen < 1024 * 1024)
+ reader.ReadBytes(dlen);
+ break;
+ case TRACE_FLUSH:
+ // No data
+ break;
+ }
+ }
+ catch { }
+ }
+
+ private void SkipColumnData(BinaryReader reader, int colId)
+ {
+ try
+ {
+ if (FixedColumnSizes.TryGetValue(colId, out int size))
+ {
+ reader.ReadBytes(size);
+ }
+ else
+ {
+ byte fb = reader.ReadByte();
+ int len = fb == 0xFF ? reader.ReadInt32() : fb;
+ if (len > 0 && len < 10 * 1024 * 1024)
+ reader.ReadBytes(len);
+ }
+ }
+ catch { }
+ }
+
+ private static TraceEventType MapEventClass(int eventId)
+ {
+ if (Enum.IsDefined(typeof(TraceEventType), eventId))
+ return (TraceEventType)eventId;
+ return TraceEventType.Unknown;
+ }
+
+ private class TrcHeader
+ {
+ public ushort UnicodeTag;
+ public ushort HeaderSize;
+ public ushort TraceVersion;
+ public string ProviderName;
+ public byte MajorVersion;
+ public byte MinorVersion;
+ public ushort BuildNumber;
+ public uint HeaderOptions;
+ public string ServerName;
+ public ushort RepeatBase;
+ }
+ }
+}
diff --git a/TraceEventImporter/Readers/XelFileReader.cs b/TraceEventImporter/Readers/XelFileReader.cs
new file mode 100644
index 00000000..dc543023
--- /dev/null
+++ b/TraceEventImporter/Readers/XelFileReader.cs
@@ -0,0 +1,270 @@
+using System;
+using System.Collections.Concurrent;
+using System.Collections.Generic;
+using System.Threading;
+using System.Threading.Tasks;
+using TraceEventImporter.Models;
+using Microsoft.SqlServer.XEvent.XELite;
+
+namespace TraceEventImporter.Readers
+{
+ ///
+ /// Reads SQL Server Extended Events (.xel) files using Microsoft.SqlServer.XEvent.XELite.
+ /// Maps XE event fields and actions to the unified TraceEvent model based on
+ /// the field mappings defined in xereaderlib/XEtoTrcConversions.h.
+ ///
+ public class XelFileReader : ITraceEventReader
+ {
+ private long _globalSeq;
+
+ public XelFileReader(long startingSeq = 0)
+ {
+ _globalSeq = startingSeq;
+ }
+
+ public string[] SupportedExtensions => new[] { ".xel" };
+
+ public IEnumerable ReadEvents(string filePath)
+ {
+ // XELite uses async callbacks; bridge to synchronous IEnumerable via BlockingCollection
+ var collection = new BlockingCollection(boundedCapacity: 1000);
+
+ Task readerTask = Task.Run(() =>
+ {
+ try
+ {
+ var streamer = new XEFileEventStreamer(filePath);
+ streamer.ReadEventStream(
+ () => Task.CompletedTask,
+ xevent =>
+ {
+ TraceEvent evt = MapEvent(xevent);
+ if (evt != null)
+ {
+ if (evt.Seq == 0)
+ evt.Seq = Interlocked.Increment(ref _globalSeq);
+ collection.Add(evt);
+ }
+ return Task.CompletedTask;
+ },
+ CancellationToken.None).Wait();
+ }
+ finally
+ {
+ collection.CompleteAdding();
+ }
+ });
+
+ foreach (TraceEvent evt in collection.GetConsumingEnumerable())
+ {
+ yield return evt;
+ }
+
+ // Propagate any exception from the reader task
+ readerTask.GetAwaiter().GetResult();
+ }
+
+ private TraceEvent MapEvent(IXEvent xe)
+ {
+ var evt = new TraceEvent();
+
+ // Map event name to TraceEventType
+ switch (xe.Name.ToLowerInvariant())
+ {
+ case "sql_batch_completed":
+ evt.EventType = TraceEventType.SqlBatchCompleted;
+ evt.EventId = 12;
+ MapBatchCompleted(xe, evt);
+ break;
+ case "sql_batch_starting":
+ evt.EventType = TraceEventType.SqlBatchStarting;
+ evt.EventId = 13;
+ MapBatchStarting(xe, evt);
+ break;
+ case "rpc_completed":
+ evt.EventType = TraceEventType.RpcCompleted;
+ evt.EventId = 10;
+ MapRpcCompleted(xe, evt);
+ break;
+ case "rpc_starting":
+ evt.EventType = TraceEventType.RpcStarting;
+ evt.EventId = 11;
+ MapRpcStarting(xe, evt);
+ break;
+ case "sp_statement_completed":
+ evt.EventType = TraceEventType.SpStmtCompleted;
+ evt.EventId = 45;
+ MapSpStmtCompleted(xe, evt);
+ break;
+ case "sp_statement_starting":
+ evt.EventType = TraceEventType.SpStmtStarting;
+ evt.EventId = 44;
+ MapSpStmtStarting(xe, evt);
+ break;
+ case "login":
+ evt.EventType = TraceEventType.AuditLogin;
+ evt.EventId = 14;
+ break;
+ case "logout":
+ evt.EventType = TraceEventType.AuditLogout;
+ evt.EventId = 15;
+ break;
+ case "attention":
+ evt.EventType = TraceEventType.Attention;
+ evt.EventId = 16;
+ break;
+ case "sql_statement_recompile":
+ evt.EventType = TraceEventType.StmtRecompile;
+ evt.EventId = 166;
+ break;
+ default:
+ evt.EventType = TraceEventType.Unknown;
+ break;
+ }
+
+ // Common timestamp
+ evt.StartTime = xe.Timestamp.UtcDateTime;
+
+ // Common actions (global fields attached to all events)
+ evt.Seq = GetActionInt64(xe, "event_sequence");
+ evt.DatabaseId = (int)GetActionInt64(xe, "database_id");
+ evt.SessionId = (int)GetActionInt64(xe, "session_id");
+ evt.RequestId = (int)GetActionInt64(xe, "request_id");
+ evt.ApplicationName = GetActionString(xe, "client_app_name");
+ evt.LoginName = GetActionString(xe, "server_principal_name");
+ evt.HostName = GetActionString(xe, "client_hostname");
+ evt.NTUserName = GetActionString(xe, "nt_username");
+
+ // If no text from event-specific mapping, try the sql_text action
+ if (string.IsNullOrEmpty(evt.TextData))
+ evt.TextData = GetActionString(xe, "sql_text");
+
+ return evt;
+ }
+
+ private void MapBatchCompleted(IXEvent xe, TraceEvent evt)
+ {
+ evt.TextData = GetFieldString(xe, "batch_text");
+ MapPerformanceFields(xe, evt);
+ evt.Error = (int?)GetFieldInt64Nullable(xe, "result");
+ }
+
+ private void MapBatchStarting(IXEvent xe, TraceEvent evt)
+ {
+ evt.TextData = GetFieldString(xe, "batch_text");
+ }
+
+ private void MapRpcCompleted(IXEvent xe, TraceEvent evt)
+ {
+ evt.TextData = GetFieldString(xe, "statement");
+ evt.ObjectName = GetFieldString(xe, "object_name");
+ MapPerformanceFields(xe, evt);
+ evt.Error = (int?)GetFieldInt64Nullable(xe, "result");
+ }
+
+ private void MapRpcStarting(IXEvent xe, TraceEvent evt)
+ {
+ evt.TextData = GetFieldString(xe, "statement");
+ evt.ObjectName = GetFieldString(xe, "object_name");
+ }
+
+ private void MapSpStmtCompleted(IXEvent xe, TraceEvent evt)
+ {
+ evt.TextData = GetFieldString(xe, "statement");
+ evt.ObjectId = (int?)GetFieldInt64Nullable(xe, "object_id");
+ evt.ObjectName = GetFieldString(xe, "object_name");
+ evt.NestLevel = (int?)GetFieldInt64Nullable(xe, "nest_level");
+ evt.LineNumber = (int?)GetFieldInt64Nullable(xe, "line_number");
+ evt.Offset = (int?)GetFieldInt64Nullable(xe, "offset");
+ MapPerformanceFields(xe, evt);
+
+ // source_database_id overrides the action-level database_id for statements
+ long? srcDbId = GetFieldInt64Nullable(xe, "source_database_id");
+ if (srcDbId.HasValue && srcDbId.Value > 0)
+ evt.DatabaseId = (int)srcDbId.Value;
+ }
+
+ private void MapSpStmtStarting(IXEvent xe, TraceEvent evt)
+ {
+ evt.TextData = GetFieldString(xe, "statement");
+ evt.ObjectId = (int?)GetFieldInt64Nullable(xe, "object_id");
+ evt.ObjectName = GetFieldString(xe, "object_name");
+ evt.NestLevel = (int?)GetFieldInt64Nullable(xe, "nest_level");
+ }
+
+ private void MapPerformanceFields(IXEvent xe, TraceEvent evt)
+ {
+ evt.CPU = GetFieldInt64Nullable(xe, "cpu_time");
+ evt.Duration = GetFieldInt64Nullable(xe, "duration");
+ evt.Writes = GetFieldInt64Nullable(xe, "writes");
+ evt.RowCount = GetFieldInt64Nullable(xe, "row_count");
+
+ // Reads = logical_reads + physical_reads (matching XEtoTrcConversions.h Add behavior)
+ long? logicalReads = GetFieldInt64Nullable(xe, "logical_reads");
+ long? physicalReads = GetFieldInt64Nullable(xe, "physical_reads");
+ if (logicalReads.HasValue || physicalReads.HasValue)
+ evt.Reads = (logicalReads ?? 0) + (physicalReads ?? 0);
+
+ // EndTime = StartTime + Duration for completed events
+ if (evt.StartTime.HasValue && evt.Duration.HasValue)
+ evt.EndTime = evt.StartTime.Value.AddMicroseconds(evt.Duration.Value);
+ }
+
+ #region Field/Action Helpers
+
+ private static string GetFieldString(IXEvent xe, string fieldName)
+ {
+ try
+ {
+ if (xe.Fields != null && xe.Fields.TryGetValue(fieldName, out object val) && val != null)
+ return val.ToString();
+ }
+ catch { }
+ return null;
+ }
+
+ private static long? GetFieldInt64Nullable(IXEvent xe, string fieldName)
+ {
+ try
+ {
+ if (xe.Fields != null && xe.Fields.TryGetValue(fieldName, out object val) && val != null)
+ return Convert.ToInt64(val);
+ }
+ catch { }
+ return null;
+ }
+
+ private static string GetActionString(IXEvent xe, string actionName)
+ {
+ try
+ {
+ if (xe.Actions != null && xe.Actions.TryGetValue(actionName, out object val) && val != null)
+ return val.ToString();
+ }
+ catch { }
+ return null;
+ }
+
+ private static long GetActionInt64(IXEvent xe, string actionName)
+ {
+ try
+ {
+ if (xe.Actions != null && xe.Actions.TryGetValue(actionName, out object val) && val != null)
+ return Convert.ToInt64(val);
+ }
+ catch { }
+ return 0;
+ }
+
+ #endregion
+ }
+
+ // Extension method for DateTime.AddMicroseconds (not available in .NET Framework 4.8)
+ internal static class DateTimeExtensions
+ {
+ public static DateTime AddMicroseconds(this DateTime dt, long microseconds)
+ {
+ return dt.AddTicks(microseconds * 10);
+ }
+ }
+}
diff --git a/TraceEventImporter/Schema/CreateSchema.sql b/TraceEventImporter/Schema/CreateSchema.sql
new file mode 100644
index 00000000..3a4a69bd
--- /dev/null
+++ b/TraceEventImporter/Schema/CreateSchema.sql
@@ -0,0 +1,425 @@
+-- CreateSchema.sql
+-- Creates the ReadTrace schema and all tables.
+-- Ported verbatim from SRC/READ80TRACE/res/tsql/*.sql
+
+IF NOT EXISTS (SELECT 1 FROM sys.schemas WHERE name = 'ReadTrace')
+ EXEC('CREATE SCHEMA ReadTrace')
+GO
+
+-- ===========================================
+-- Reference / Metadata Tables
+-- ===========================================
+
+IF OBJECT_ID('ReadTrace.tblMiscInfo', 'U') IS NOT NULL DROP TABLE ReadTrace.tblMiscInfo
+GO
+CREATE TABLE ReadTrace.tblMiscInfo
+(
+ Attribute nvarchar(50) NOT NULL,
+ Value nvarchar(2000) NULL,
+ iRow int NOT NULL IDENTITY(1,1) PRIMARY KEY CLUSTERED
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblTraceFiles', 'U') IS NOT NULL DROP TABLE ReadTrace.tblTraceFiles
+GO
+CREATE TABLE ReadTrace.tblTraceFiles
+(
+ FileProcessed int IDENTITY(1, 1) NOT NULL,
+ FirstSeqNumber bigint NULL,
+ LastSeqNumber bigint NULL,
+ FirstEventTime datetime NULL,
+ LastEventTime datetime NULL,
+ EventsRead bigint NOT NULL,
+ TraceFileName nvarchar(512) NOT NULL,
+ CONSTRAINT PK_tblTraceFiles PRIMARY KEY (FileProcessed)
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblTracedEvents', 'U') IS NOT NULL DROP TABLE ReadTrace.tblTracedEvents
+GO
+CREATE TABLE ReadTrace.tblTracedEvents
+(
+ EventID smallint NOT NULL CONSTRAINT PK_TracedEvents PRIMARY KEY
+)
+GO
+
+IF OBJECT_ID('ReadTrace.trace_events', 'U') IS NOT NULL DROP TABLE ReadTrace.trace_events
+GO
+CREATE TABLE ReadTrace.trace_events
+(
+ trace_event_id int NOT NULL PRIMARY KEY CLUSTERED,
+ category_id int NOT NULL,
+ name nvarchar(128) NOT NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblUniqueAppNames', 'U') IS NOT NULL DROP TABLE ReadTrace.tblUniqueAppNames
+GO
+CREATE TABLE ReadTrace.tblUniqueAppNames
+(
+ [iID] [int] NOT NULL PRIMARY KEY IDENTITY(1,1),
+ [AppName] nvarchar(256) NOT NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblUniqueLoginNames', 'U') IS NOT NULL DROP TABLE ReadTrace.tblUniqueLoginNames
+GO
+CREATE TABLE ReadTrace.tblUniqueLoginNames
+(
+ [iID] [int] NOT NULL PRIMARY KEY IDENTITY(1,1),
+ [LoginName] nvarchar(256) NOT NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblProcedureNames', 'U') IS NOT NULL DROP TABLE ReadTrace.tblProcedureNames
+GO
+CREATE TABLE ReadTrace.tblProcedureNames
+(
+ DBID int NULL,
+ ObjectID int NULL,
+ SpecialProcID tinyint NULL,
+ [Name] nvarchar(388) NOT NULL
+)
+GO
+
+-- ===========================================
+-- Unique / Normalization Tables
+-- ===========================================
+
+IF OBJECT_ID('ReadTrace.tblUniqueBatches', 'U') IS NOT NULL DROP TABLE ReadTrace.tblUniqueBatches
+GO
+CREATE TABLE ReadTrace.tblUniqueBatches
+(
+ Seq bigint NOT NULL,
+ HashID bigint NOT NULL,
+ OrigText nvarchar(max) NOT NULL,
+ NormText nvarchar(max) NOT NULL,
+ SpecialProcID tinyint NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblUniqueStatements', 'U') IS NOT NULL DROP TABLE ReadTrace.tblUniqueStatements
+GO
+CREATE TABLE ReadTrace.tblUniqueStatements
+(
+ Seq bigint NOT NULL,
+ HashID bigint NOT NULL,
+ OrigText nvarchar(max) NULL,
+ NormText nvarchar(max) NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblUniquePlans', 'U') IS NOT NULL DROP TABLE ReadTrace.tblUniquePlans
+GO
+CREATE TABLE ReadTrace.tblUniquePlans
+(
+ Seq bigint NOT NULL,
+ PlanHashID bigint NOT NULL,
+ DBID int NULL,
+ NormPlanText nvarchar(max) NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblUniquePlanRows', 'U') IS NOT NULL DROP TABLE ReadTrace.tblUniquePlanRows
+GO
+CREATE TABLE ReadTrace.tblUniquePlanRows
+(
+ PlanHashID bigint NOT NULL,
+ Rows bigint NULL,
+ Executes bigint NULL,
+ StmtText nvarchar(max) NOT NULL,
+ StmtID int NOT NULL,
+ NodeID smallint NOT NULL,
+ Parent smallint NULL,
+ PhysicalOp varchar(30) NULL,
+ LogicalOp varchar(30) NULL,
+ Argument nvarchar(256) NULL,
+ DefinedValues nvarchar(256) NULL,
+ EstimateRows float NULL,
+ EstimateIO float NULL,
+ EstimateCPU float NULL,
+ AvgRowSize int NULL,
+ TotalSubtreeCost float NULL,
+ OutputList nvarchar(256) NULL,
+ Warnings varchar(100) NULL,
+ Type varchar(30) NULL,
+ Parallel tinyint NULL,
+ EstimateExecutions float NULL,
+ RowOrder smallint NOT NULL
+)
+GO
+
+-- ===========================================
+-- Fact Tables
+-- ===========================================
+
+IF OBJECT_ID('ReadTrace.tblBatches', 'U') IS NOT NULL DROP TABLE ReadTrace.tblBatches
+GO
+CREATE TABLE ReadTrace.tblBatches
+(
+ BatchSeq bigint NOT NULL,
+ HashID bigint NOT NULL,
+ Session int NOT NULL,
+ Request int NOT NULL,
+ ConnId bigint NOT NULL,
+ StartTime datetime NULL,
+ EndTime datetime NULL,
+ Duration bigint NULL,
+ Reads bigint NULL,
+ Writes bigint NULL,
+ CPU bigint NULL,
+ fRPCEvent tinyint NOT NULL,
+ DBID int NULL,
+ StartSeq bigint NULL,
+ EndSeq bigint NULL,
+ AttnSeq bigint NULL,
+ ConnSeq bigint NULL,
+ TextData nvarchar(max) NULL,
+ OrigRowCount bigint NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblStatements', 'U') IS NOT NULL DROP TABLE ReadTrace.tblStatements
+GO
+CREATE TABLE ReadTrace.tblStatements
+(
+ StmtSeq bigint NOT NULL,
+ HashID bigint NOT NULL,
+ Session int NOT NULL,
+ Request int NOT NULL,
+ ConnId bigint NOT NULL,
+ StartTime datetime NULL,
+ EndTime datetime NULL,
+ Duration bigint NULL,
+ Reads bigint NULL,
+ Writes bigint NULL,
+ CPU bigint NULL,
+ Rows bigint NULL,
+ DBID int NULL,
+ ObjectID int NULL,
+ NestLevel tinyint NULL,
+ fDynamicSQL bit NULL,
+ StartSeq bigint NULL,
+ EndSeq bigint NULL,
+ ConnSeq bigint NULL,
+ BatchSeq bigint NULL,
+ ParentStmtSeq bigint NULL,
+ AttnSeq bigint NULL,
+ TextData nvarchar(max) NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblPlans', 'U') IS NOT NULL DROP TABLE ReadTrace.tblPlans
+GO
+CREATE TABLE ReadTrace.tblPlans
+(
+ Seq bigint NOT NULL,
+ PlanHashID bigint NOT NULL,
+ DBID int NULL,
+ BatchSeq bigint NULL,
+ StmtSeq bigint NULL,
+ Session int NOT NULL,
+ Request int NOT NULL,
+ ConnId bigint NOT NULL,
+ StartTime datetime NOT NULL,
+ DOP tinyint NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblPlanRows', 'U') IS NOT NULL DROP TABLE ReadTrace.tblPlanRows
+GO
+CREATE TABLE ReadTrace.tblPlanRows
+(
+ Seq bigint NOT NULL,
+ Rows bigint NULL,
+ Executes bigint NULL,
+ EstimateRows float NULL,
+ EstimateExecutes float NULL,
+ RowOrder smallint NOT NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblConnections', 'U') IS NOT NULL DROP TABLE ReadTrace.tblConnections
+GO
+CREATE TABLE ReadTrace.tblConnections
+(
+ ConnSeq bigint NOT NULL,
+ Session int NOT NULL,
+ StartTime datetime NULL,
+ EndTime datetime NULL,
+ Duration bigint NULL,
+ Reads bigint NULL,
+ Writes bigint NULL,
+ CPU bigint NULL,
+ ApplicationName nvarchar(256) NULL,
+ LoginName nvarchar(256) NULL,
+ HostName nvarchar(256) NULL,
+ NTDomainName nvarchar(256) NULL,
+ NTUserName nvarchar(256) NULL,
+ StartSeq bigint NULL,
+ EndSeq bigint NULL,
+ TextData nvarchar(max) NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblInterestingEvents', 'U') IS NOT NULL DROP TABLE ReadTrace.tblInterestingEvents
+GO
+CREATE TABLE ReadTrace.tblInterestingEvents
+(
+ Seq bigint NOT NULL,
+ EventID int NOT NULL,
+ Session int NOT NULL,
+ Request int NOT NULL,
+ ConnId bigint NOT NULL,
+ StartTime datetime NULL,
+ EndTime datetime NULL,
+ Duration bigint NULL,
+ DBID int NULL,
+ IntegerData int NULL,
+ EventSubclass int NULL,
+ TextData varchar(1000) NULL,
+ ObjectID int NULL,
+ Error int NULL,
+ BatchSeq bigint NULL,
+ Severity int NULL,
+ State int NULL
+)
+GO
+
+-- ===========================================
+-- Time Intervals and Aggregation Tables
+-- ===========================================
+
+IF OBJECT_ID('ReadTrace.tblTimeIntervals', 'U') IS NOT NULL DROP TABLE ReadTrace.tblTimeIntervals
+GO
+CREATE TABLE ReadTrace.tblTimeIntervals
+(
+ TimeInterval int NOT NULL IDENTITY(1, 1) PRIMARY KEY NONCLUSTERED,
+ StartTime datetime NOT NULL,
+ EndTime datetime NOT NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblBatchPartialAggs', 'U') IS NOT NULL DROP TABLE ReadTrace.tblBatchPartialAggs
+GO
+CREATE TABLE ReadTrace.tblBatchPartialAggs
+(
+ [HashID] [bigint] NOT NULL,
+ [TimeInterval] [int] NOT NULL,
+ [StartingEvents] [int] NOT NULL,
+ [CompletedEvents] [int] NOT NULL,
+ [AttentionEvents] [int] NOT NULL,
+ [MinDuration] [bigint] NULL,
+ [MaxDuration] [bigint] NULL,
+ [TotalDuration] [bigint] NULL,
+ [MinReads] [bigint] NULL,
+ [MaxReads] [bigint] NULL,
+ [TotalReads] [bigint] NULL,
+ [MinWrites] [bigint] NULL,
+ [MaxWrites] [bigint] NULL,
+ [TotalWrites] [bigint] NULL,
+ [MinCPU] [bigint] NULL,
+ [MaxCPU] [bigint] NULL,
+ [TotalCPU] [bigint] NULL,
+ [AppNameID] int NOT NULL,
+ [LoginNameID] int NOT NULL,
+ [DBID] int NOT NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblStmtPartialAggs', 'U') IS NOT NULL DROP TABLE ReadTrace.tblStmtPartialAggs
+GO
+CREATE TABLE ReadTrace.tblStmtPartialAggs
+(
+ [HashID] [bigint] NOT NULL,
+ [TimeInterval] [int] NOT NULL,
+ [ObjectID] int NULL,
+ [DBID] int NULL,
+ [AppNameID] int NOT NULL,
+ [LoginNameID] int NOT NULL,
+ [StartingEvents] [int] NOT NULL,
+ [CompletedEvents] [int] NOT NULL,
+ [AttentionEvents] [int] NOT NULL,
+ [MinDuration] [bigint] NULL,
+ [MaxDuration] [bigint] NULL,
+ [TotalDuration] [bigint] NULL,
+ [MinReads] [bigint] NULL,
+ [MaxReads] [bigint] NULL,
+ [TotalReads] [bigint] NULL,
+ [MinWrites] [bigint] NULL,
+ [MaxWrites] [bigint] NULL,
+ [TotalWrites] [bigint] NULL,
+ [MinCPU] [bigint] NULL,
+ [MaxCPU] [bigint] NULL,
+ [TotalCPU] [bigint] NULL
+)
+GO
+
+IF OBJECT_ID('ReadTrace.tblComparisonBatchPartialAggs', 'U') IS NOT NULL DROP TABLE ReadTrace.tblComparisonBatchPartialAggs
+GO
+CREATE TABLE ReadTrace.tblComparisonBatchPartialAggs
+(
+ [b.HashID] [bigint] NULL,
+ [c.HashID] [bigint] NULL,
+ [b.StartingEvents] [bigint] NOT NULL,
+ [b.CompletedEvents] [bigint] NOT NULL,
+ [b.TotalCPU] [bigint] NOT NULL,
+ [b.TotalDuration] [bigint] NOT NULL,
+ [b.TotalReads] [bigint] NOT NULL,
+ [b.TotalWrites] [bigint] NOT NULL,
+ [c.StartingEvents] [bigint] NOT NULL,
+ [c.CompletedEvents] [bigint] NOT NULL,
+ [c.TotalCPU] [bigint] NOT NULL,
+ [c.TotalDuration] [bigint] NOT NULL,
+ [c.TotalReads] [bigint] NOT NULL,
+ [c.TotalWrites] [bigint] NOT NULL,
+ [ProjectedCPUDiff] numeric(38,4) NOT NULL,
+ [ProjectedReadsDiff] numeric(38,4) NOT NULL,
+ [ProjectedWritesDiff] numeric(38,4) NOT NULL,
+ [ProjectedDurationDiff] numeric(38,4) NOT NULL
+)
+GO
+
+-- ===========================================
+-- Warnings Table
+-- ===========================================
+
+IF OBJECT_ID('ReadTrace.tblWarnings', 'U') IS NOT NULL DROP TABLE ReadTrace.tblWarnings
+GO
+CREATE TABLE ReadTrace.tblWarnings
+(
+ WarningID int NOT NULL IDENTITY(1, 1) PRIMARY KEY,
+ WarningMessage varchar(2000) NOT NULL,
+ NumberOfTimes int NULL,
+ FirstGlobalSeq bigint NULL,
+ fMayAffectCPU bit NOT NULL,
+ fMayAffectIO bit NOT NULL,
+ fMayAffectDuration bit NOT NULL,
+ fAffectsEventAssociation bit NOT NULL
+)
+GO
+
+-- ===========================================
+-- Views
+-- ===========================================
+
+IF OBJECT_ID('ReadTrace.vwBatchPartialAggsByGroupTimeInterval', 'V') IS NOT NULL
+ DROP VIEW ReadTrace.vwBatchPartialAggsByGroupTimeInterval
+GO
+CREATE VIEW ReadTrace.vwBatchPartialAggsByGroupTimeInterval
+AS
+SELECT
+ t.StartTime,
+ t.EndTime,
+ a.TimeInterval,
+ SUM(a.StartingEvents) AS StartingEvents,
+ SUM(a.CompletedEvents) AS CompletedEvents,
+ SUM(a.AttentionEvents) AS Attentions,
+ SUM(a.TotalDuration) AS Duration,
+ SUM(a.TotalReads) AS Reads,
+ SUM(a.TotalWrites) AS Writes,
+ SUM(a.TotalCPU) AS CPU
+FROM ReadTrace.tblBatchPartialAggs a
+INNER JOIN ReadTrace.tblTimeIntervals t ON a.TimeInterval = t.TimeInterval
+GROUP BY a.TimeInterval, t.StartTime, t.EndTime
+GO
diff --git a/TraceEventImporter/Schema/PostLoadFixups.sql b/TraceEventImporter/Schema/PostLoadFixups.sql
new file mode 100644
index 00000000..cfdca323
--- /dev/null
+++ b/TraceEventImporter/Schema/PostLoadFixups.sql
@@ -0,0 +1,92 @@
+-- PostLoadFixups.sql
+-- Run after all data is bulk-loaded to create indexes and reconcile relationships.
+
+-- ===========================================
+-- Primary / Clustered Indexes
+-- ===========================================
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE object_id = OBJECT_ID('ReadTrace.tblBatches') AND name = 'PK_tblBatches')
+ ALTER TABLE ReadTrace.tblBatches ADD CONSTRAINT PK_tblBatches PRIMARY KEY CLUSTERED (BatchSeq)
+GO
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE object_id = OBJECT_ID('ReadTrace.tblStatements') AND name = 'PK_tblStatements')
+ ALTER TABLE ReadTrace.tblStatements ADD CONSTRAINT PK_tblStatements PRIMARY KEY CLUSTERED (StmtSeq)
+GO
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE object_id = OBJECT_ID('ReadTrace.tblConnections') AND name = 'PK_tblConnections')
+ ALTER TABLE ReadTrace.tblConnections ADD CONSTRAINT PK_tblConnections PRIMARY KEY CLUSTERED (ConnSeq)
+GO
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE object_id = OBJECT_ID('ReadTrace.tblUniqueBatches') AND name = 'PK_tblUniqueBatches')
+ ALTER TABLE ReadTrace.tblUniqueBatches ADD CONSTRAINT PK_tblUniqueBatches PRIMARY KEY CLUSTERED (HashID)
+GO
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE object_id = OBJECT_ID('ReadTrace.tblUniqueStatements') AND name = 'PK_tblUniqueStatements')
+ ALTER TABLE ReadTrace.tblUniqueStatements ADD CONSTRAINT PK_tblUniqueStatements PRIMARY KEY CLUSTERED (HashID)
+GO
+
+-- ===========================================
+-- Nonclustered Indexes
+-- ===========================================
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE object_id = OBJECT_ID('ReadTrace.tblBatches') AND name = 'tblBatches_HashID')
+ CREATE NONCLUSTERED INDEX tblBatches_HashID ON ReadTrace.tblBatches (HashID)
+GO
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE object_id = OBJECT_ID('ReadTrace.tblBatches') AND name = 'tblBatches_Session')
+ CREATE NONCLUSTERED INDEX tblBatches_Session ON ReadTrace.tblBatches (Session, Request)
+GO
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE object_id = OBJECT_ID('ReadTrace.tblStatements') AND name = 'tblStatements_HashID')
+ CREATE NONCLUSTERED INDEX tblStatements_HashID ON ReadTrace.tblStatements (HashID)
+GO
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE object_id = OBJECT_ID('ReadTrace.tblStatements') AND name = 'tblStatements_BatchSeq')
+ CREATE NONCLUSTERED INDEX tblStatements_BatchSeq ON ReadTrace.tblStatements (BatchSeq)
+GO
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE object_id = OBJECT_ID('ReadTrace.tblInterestingEvents') AND name = 'tblInterestingEvents_BatchSeq')
+ CREATE NONCLUSTERED INDEX tblInterestingEvents_BatchSeq ON ReadTrace.tblInterestingEvents (BatchSeq)
+GO
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE object_id = OBJECT_ID('ReadTrace.tblBatchPartialAggs') AND name = 'tblBatchPartialAggs_HashID')
+ CREATE NONCLUSTERED INDEX tblBatchPartialAggs_HashID ON ReadTrace.tblBatchPartialAggs (HashID, TimeInterval)
+GO
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE object_id = OBJECT_ID('ReadTrace.tblStmtPartialAggs') AND name = 'tblStmtPartialAggs_HashID')
+ CREATE NONCLUSTERED INDEX tblStmtPartialAggs_HashID ON ReadTrace.tblStmtPartialAggs (HashID, TimeInterval)
+GO
+
+-- ===========================================
+-- Post-load Fixups: Link ConnSeq in tblBatches
+-- ===========================================
+UPDATE b
+SET b.ConnSeq = c.ConnSeq
+FROM ReadTrace.tblBatches b
+INNER JOIN ReadTrace.tblConnections c ON b.Session = c.Session
+WHERE b.ConnSeq IS NULL
+GO
+
+-- ===========================================
+-- Post-load Fixups: Link ParentStmtSeq for nested statements
+-- ===========================================
+;WITH StmtParents AS
+(
+ SELECT
+ s.StmtSeq,
+ s.BatchSeq,
+ s.NestLevel,
+ s.Session,
+ s.Request,
+ s.StartTime,
+ LAG(s.StmtSeq) OVER (PARTITION BY s.Session, s.Request ORDER BY s.StmtSeq) AS PrevStmtSeq,
+ LAG(s.NestLevel) OVER (PARTITION BY s.Session, s.Request ORDER BY s.StmtSeq) AS PrevNestLevel
+ FROM ReadTrace.tblStatements s
+ WHERE s.NestLevel > 1
+)
+UPDATE s
+SET s.ParentStmtSeq = sp.PrevStmtSeq
+FROM ReadTrace.tblStatements s
+INNER JOIN StmtParents sp ON s.StmtSeq = sp.StmtSeq
+WHERE sp.PrevNestLevel IS NOT NULL AND sp.PrevNestLevel < sp.NestLevel
+GO
diff --git a/TraceEventImporter/TraceEventImporter.csproj b/TraceEventImporter/TraceEventImporter.csproj
new file mode 100644
index 00000000..8cf7ea5a
--- /dev/null
+++ b/TraceEventImporter/TraceEventImporter.csproj
@@ -0,0 +1,30 @@
+
+
+ net48
+ TraceEventImporter
+ TraceEventImporter
+ true
+ true
+ ..\NexusInterfaces\SqlNexus.snk
+ 7.3
+ false
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/TraceEventImporter/TraceEventImporterPlugin.cs b/TraceEventImporter/TraceEventImporterPlugin.cs
new file mode 100644
index 00000000..78b55648
--- /dev/null
+++ b/TraceEventImporter/TraceEventImporterPlugin.cs
@@ -0,0 +1,386 @@
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Reflection;
+using System.Text.RegularExpressions;
+using System.Windows.Forms;
+using Microsoft.Data.SqlClient;
+using NexusInterfaces;
+using TraceEventImporter.Database;
+using TraceEventImporter.Models;
+using TraceEventImporter.Processing;
+using TraceEventImporter.Readers;
+
+namespace TraceEventImporter
+{
+ ///
+ /// SqlNexus importer plugin that reads .trc and .xel trace files, normalizes SQL text,
+ /// computes HashIDs, and bulk-loads data into the ReadTrace schema.
+ /// Coexists with the existing ReadTraceNexusImporter (which shells out to ReadTrace.exe).
+ /// Discovered automatically by SqlNexus via DLL reflection.
+ ///
+ public class TraceEventImporterPlugin : INexusImporter, INexusProgressReporter
+ {
+ // Options
+ private const string OPTION_ENABLED = "Enabled";
+ private const string OPTION_DROP_EXISTING = "Drop existing ReadTrace tables";
+ private const string OPTION_INTERVAL_SECONDS = "Aggregation interval (seconds)";
+
+ // Private state
+ private ILogger _logger;
+ private string _connStr;
+ private string _server;
+ private bool _useWindowsAuth;
+ private string _sqlLogin;
+ private string _sqlPassword;
+ private string _database;
+ private string _fileMask;
+ private ImportState _state = ImportState.Idle;
+ private bool _cancelled;
+ private long _totalRowsInserted;
+ private long _totalLinesProcessed;
+ private long _currentPosition;
+ private readonly ArrayList _knownRowsets = new ArrayList();
+ private readonly Dictionary _options = new Dictionary();
+
+ public TraceEventImporterPlugin()
+ {
+ _options.Add(OPTION_DROP_EXISTING, true);
+ _options.Add(OPTION_INTERVAL_SECONDS, 60);
+ _options.Add(OPTION_ENABLED, true);
+ }
+
+ #region INexusImporter
+
+ public Guid ID => new Guid("B7A3C2D1-E4F5-4A6B-8C9D-0E1F2A3B4C5D");
+
+ public string Name => "Trace Event Importer (Managed)";
+
+ public string[] SupportedMasks => new[] { "*.trc", "*pssdiag*.xel", "*LogScout*.xel" };
+
+ public Dictionary Options => _options;
+
+ public Form OptionsDialog => null;
+
+ public string[] PreScripts => new string[0];
+
+ public string[] PostScripts => new string[] { "ReadTracePostProcessing.sql" };
+
+ public ImportState State
+ {
+ get => _state;
+ private set
+ {
+ _state = value;
+ OnStatusChanged(EventArgs.Empty);
+ }
+ }
+
+ public bool Cancelled
+ {
+ get => _cancelled;
+ set => _cancelled = value;
+ }
+
+ public ArrayList KnownRowsets => _knownRowsets;
+
+ public long TotalRowsInserted => _totalRowsInserted;
+
+ public long TotalLinesProcessed => _totalLinesProcessed;
+
+ public void Initialize(string Filemask, string connString, string Server,
+ bool UseWindowsAuth, string SQLLogin, string SQLPassword, string DatabaseName, ILogger Logger)
+ {
+ _fileMask = Filemask;
+ _connStr = connString;
+ _server = Server;
+ _useWindowsAuth = UseWindowsAuth;
+ _sqlLogin = SQLLogin;
+ _sqlPassword = SQLPassword;
+ _database = DatabaseName;
+ _logger = Logger;
+
+ _state = ImportState.Idle;
+ _cancelled = false;
+ _totalRowsInserted = 0;
+ _totalLinesProcessed = 0;
+ }
+
+ public bool DoImport()
+ {
+ try
+ {
+ LogMessage("TraceEventImporter: Starting import...");
+ State = ImportState.Importing;
+
+ // 1. Find trace files
+ string dir = Path.GetDirectoryName(_fileMask);
+ string mask = Path.GetFileName(_fileMask);
+ if (string.IsNullOrEmpty(dir) || !Directory.Exists(dir))
+ {
+ LogMessage("TraceEventImporter: Directory not found: " + dir);
+ State = ImportState.NoFiles;
+ return false;
+ }
+
+ string[] allFiles = Directory.GetFiles(dir, mask);
+ string[] files = allFiles.Where(f => !SkipFile(f)).OrderBy(f => f).ToArray();
+
+ if (files.Length == 0)
+ {
+ LogMessage("TraceEventImporter: No eligible trace files found.");
+ State = ImportState.NoFiles;
+ return false;
+ }
+
+ int excluded = allFiles.Length - files.Length;
+ if (excluded > 0)
+ LogMessage($"TraceEventImporter: Excluded {excluded} file(s) (log_NNN.trc / *_blk.trc).");
+
+ LogMessage($"TraceEventImporter: Processing {files.Length} file(s)...");
+
+ // 2. Deploy schema (always ensure tables exist; option controls whether to drop first)
+ LogMessage("TraceEventImporter: Creating ReadTrace schema and tables...");
+ DeploySchema();
+
+ // 3. Process all files
+ var store = new UniqueStore();
+ var processor = new EventProcessor(store);
+ int intervalSeconds = Convert.ToInt32(_options[OPTION_INTERVAL_SECONDS]);
+ long globalSeq = 0;
+
+ using (var writer = new BulkWriter(_connStr))
+ {
+ foreach (string file in files)
+ {
+ if (_cancelled) break;
+
+ string ext = Path.GetExtension(file).ToLowerInvariant();
+ LogMessage($"TraceEventImporter: Reading {Path.GetFileName(file)}...");
+
+ ITraceEventReader reader;
+ if (ext == ".xel")
+ reader = new XelFileReader(globalSeq);
+ else
+ reader = new TrcFileReader(globalSeq);
+
+ long fileFirstSeq = long.MaxValue;
+ long fileLastSeq = 0;
+ DateTime? fileFirstTime = null;
+ DateTime? fileLastTime = null;
+ long fileEventsRead = 0;
+
+ foreach (TraceEvent evt in reader.ReadEvents(file))
+ {
+ if (_cancelled) break;
+
+ processor.ProcessEvent(evt);
+ _totalLinesProcessed++;
+ fileEventsRead++;
+
+ if (evt.Seq < fileFirstSeq) fileFirstSeq = evt.Seq;
+ if (evt.Seq > fileLastSeq) fileLastSeq = evt.Seq;
+ if (evt.Seq > globalSeq) globalSeq = evt.Seq;
+
+ if (evt.StartTime.HasValue)
+ {
+ if (!fileFirstTime.HasValue || evt.StartTime.Value < fileFirstTime.Value)
+ fileFirstTime = evt.StartTime;
+ if (!fileLastTime.HasValue || evt.StartTime.Value > fileLastTime.Value)
+ fileLastTime = evt.StartTime;
+ }
+
+ // Report progress periodically
+ if (fileEventsRead % 10000 == 0)
+ {
+ _currentPosition = fileEventsRead;
+ OnProgressChanged(EventArgs.Empty);
+ }
+ }
+
+ // Record trace file info
+ if (fileEventsRead > 0)
+ {
+ writer.WriteTraceFile(fileFirstSeq, fileLastSeq, fileFirstTime, fileLastTime, fileEventsRead, Path.GetFileName(file));
+ }
+
+ LogMessage($"TraceEventImporter: {Path.GetFileName(file)} - {fileEventsRead} events read.");
+ }
+
+ if (_cancelled)
+ {
+ LogMessage("TraceEventImporter: Import cancelled.");
+ State = ImportState.Idle;
+ return false;
+ }
+
+ // 4. Finalize processor (flush pending connections)
+ processor.Finalize();
+
+ // 5. Write all data
+ LogMessage("TraceEventImporter: Writing data to database...");
+
+ // Metadata
+ writer.WriteMiscInfo("SchemaVersion", "3.0");
+ writer.WriteMiscInfo("LoadDateTime", DateTime.UtcNow.ToString("yyyy-MM-dd HH:mm:ss"));
+ writer.WriteMiscInfo("ImporterVersion", "TraceEventImporter 1.0");
+
+ // Reference data
+ writer.WriteTracedEvents(store.GetTracedEventIds());
+ writer.WriteUniqueAppNames(store.GetUniqueAppNames());
+ writer.WriteUniqueLoginNames(store.GetUniqueLoginNames());
+ writer.WriteProcedureNames(store.GetProcedureNames());
+
+ // Unique text
+ writer.WriteUniqueBatches(store.GetUniqueBatches());
+ writer.WriteUniqueStatements(store.GetUniqueStatements());
+
+ // Fact data
+ writer.WriteConnections(processor.Connections);
+ writer.WriteBatches(processor.Batches);
+ writer.WriteStatements(processor.Statements);
+ writer.WriteInterestingEvents(processor.InterestingEvents);
+
+ // 6. Aggregation
+ LogMessage("TraceEventImporter: Computing aggregations...");
+ var aggregator = new Aggregator(intervalSeconds);
+ aggregator.Compute(processor.Batches, processor.Statements);
+
+ writer.WriteTimeIntervals(aggregator.TimeIntervals);
+ writer.WriteBatchPartialAggs(aggregator.BatchAggs);
+ writer.WriteStmtPartialAggs(aggregator.StmtAggs);
+
+ _totalRowsInserted = writer.TotalRowsInserted;
+ }
+
+ // 7. Post-load fixups (indexes, ConnSeq linking, ParentStmtSeq)
+ LogMessage("TraceEventImporter: Running post-load fixups...");
+ RunPostLoadFixups();
+
+ LogMessage($"TraceEventImporter: Import complete. {_totalRowsInserted} total rows inserted.");
+ State = ImportState.Idle;
+ return true;
+ }
+ catch (Exception ex)
+ {
+ LogMessage($"TraceEventImporter: Error - {ex.Message}");
+ LogMessage(ex.ToString());
+ State = ImportState.Idle;
+ return false;
+ }
+ }
+
+ public void Cancel()
+ {
+ _cancelled = true;
+ State = ImportState.Canceling;
+ LogMessage("TraceEventImporter: Cancel requested.");
+ }
+
+ public event EventHandler StatusChanged;
+
+ public void OnStatusChanged(EventArgs e)
+ {
+ StatusChanged?.Invoke(this, e);
+ }
+
+ #endregion
+
+ #region INexusProgressReporter
+
+ public long CurrentPosition => _currentPosition;
+
+ public event EventHandler ProgressChanged;
+
+ public void OnProgressChanged(EventArgs e)
+ {
+ ProgressChanged?.Invoke(this, e);
+ }
+
+ #endregion
+
+ #region Private Helpers
+
+ private void DeploySchema()
+ {
+ string sql = LoadEmbeddedResource("TraceEventImporter.Schema.CreateSchema.sql");
+ LogMessage($"TraceEventImporter: Schema script loaded ({sql.Length} chars).");
+ ExecuteSqlScript(sql);
+ LogMessage("TraceEventImporter: Schema deployment complete.");
+ }
+
+ private void RunPostLoadFixups()
+ {
+ string sql = LoadEmbeddedResource("TraceEventImporter.Schema.PostLoadFixups.sql");
+ ExecuteSqlScript(sql);
+ }
+
+ private void ExecuteSqlScript(string script)
+ {
+ // Split on GO statements (same approach as CSql in NexusInterfaces)
+ string[] batches = Regex.Split(script, @"^\s*GO\s*$", RegexOptions.Multiline | RegexOptions.IgnoreCase);
+
+ int executedCount = 0;
+ using (var conn = new SqlConnection(_connStr))
+ {
+ conn.Open();
+ LogMessage($"TraceEventImporter: Executing SQL script ({batches.Length} batches) on database '{conn.Database}'...");
+ foreach (string batch in batches)
+ {
+ string trimmed = batch.Trim();
+ if (trimmed.Length == 0) continue;
+
+ using (var cmd = new SqlCommand(trimmed, conn))
+ {
+ cmd.CommandTimeout = 0;
+ cmd.ExecuteNonQuery();
+ executedCount++;
+ }
+ }
+ LogMessage($"TraceEventImporter: Executed {executedCount} SQL batch(es) successfully.");
+ }
+ }
+
+ private static string LoadEmbeddedResource(string resourceName)
+ {
+ var assembly = Assembly.GetExecutingAssembly();
+ using (Stream stream = assembly.GetManifestResourceStream(resourceName))
+ {
+ if (stream == null)
+ throw new InvalidOperationException($"Embedded resource not found: {resourceName}");
+ using (var reader = new StreamReader(stream))
+ {
+ return reader.ReadToEnd();
+ }
+ }
+ }
+
+ private bool SkipFile(string fullFileName)
+ {
+ string name = Path.GetFileName(fullFileName);
+ if (string.IsNullOrEmpty(name)) return false;
+
+ // Skip blocked trace files
+ if (name.EndsWith("_blk.trc", StringComparison.OrdinalIgnoreCase))
+ return true;
+
+ // Skip internal log trace files
+ if (Regex.IsMatch(name, @"^log_\d+\.trc$", RegexOptions.IgnoreCase))
+ return true;
+
+ return false;
+ }
+
+ private void LogMessage(string msg)
+ {
+ if (_logger != null)
+ _logger.LogMessage(msg);
+ else
+ System.Diagnostics.Trace.WriteLine(msg);
+ }
+
+ #endregion
+ }
+}
diff --git a/sqlnexus.sln b/sqlnexus.sln
index 504c5120..d909ed5d 100644
--- a/sqlnexus.sln
+++ b/sqlnexus.sln
@@ -28,6 +28,8 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution
Setup-Related\SetupSQLNexusPrereq.ps1 = Setup-Related\SetupSQLNexusPrereq.ps1
EndProjectSection
EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TraceEventImporter", "TraceEventImporter\TraceEventImporter.csproj", "{08FCB9FF-C633-4B28-947B-39D553091600}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -395,6 +397,56 @@ Global
{CCF72846-1645-4548-8E66-A2FFCE83692D}.Release|x64.Build.0 = Release|Any CPU
{CCF72846-1645-4548-8E66-A2FFCE83692D}.Release|x86.ActiveCfg = Release|Any CPU
{CCF72846-1645-4548-8E66-A2FFCE83692D}.Release|x86.Build.0 = Release|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug|Win32.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug|Win32.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug|x64.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug|x86.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug64|Any CPU.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug64|Any CPU.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug64|Mixed Platforms.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug64|Mixed Platforms.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug64|Win32.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug64|Win32.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug64|x64.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug64|x64.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug64|x86.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Debug64|x86.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.DebugLocal|Any CPU.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.DebugLocal|Any CPU.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.DebugLocal|Mixed Platforms.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.DebugLocal|Mixed Platforms.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.DebugLocal|Win32.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.DebugLocal|Win32.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.DebugLocal|x64.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.DebugLocal|x64.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.DebugLocal|x86.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.DebugLocal|x86.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Production|Any CPU.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Production|Any CPU.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Production|Mixed Platforms.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Production|Mixed Platforms.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Production|Win32.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Production|Win32.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Production|x64.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Production|x64.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Production|x86.ActiveCfg = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Production|x86.Build.0 = Debug|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Release|Any CPU.Build.0 = Release|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Release|Mixed Platforms.Build.0 = Release|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Release|Win32.ActiveCfg = Release|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Release|Win32.Build.0 = Release|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Release|x64.ActiveCfg = Release|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Release|x64.Build.0 = Release|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Release|x86.ActiveCfg = Release|Any CPU
+ {08FCB9FF-C633-4B28-947B-39D553091600}.Release|x86.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
diff --git a/sqlnexus/AppConfig.xml b/sqlnexus/AppConfig.xml
index 27eb501d..cf8149ac 100644
--- a/sqlnexus/AppConfig.xml
+++ b/sqlnexus/AppConfig.xml
@@ -18,6 +18,8 @@
+
+
diff --git a/sqlnexus/Utilities.cs b/sqlnexus/Utilities.cs
index 497aa58a..9e68606c 100644
--- a/sqlnexus/Utilities.cs
+++ b/sqlnexus/Utilities.cs
@@ -35,6 +35,10 @@ public static bool IsEnabled(String option)
{
return m_options.ContainsKey(option) && m_options[option];
}
+ public static bool HasOption(String option)
+ {
+ return m_options.ContainsKey(option);
+ }
public static void Set(String option, bool Enable )
{
if (m_options.ContainsKey(option))
diff --git a/sqlnexus/fmImport.cs b/sqlnexus/fmImport.cs
index cb1f47ea..9d22291a 100644
--- a/sqlnexus/fmImport.cs
+++ b/sqlnexus/fmImport.cs
@@ -412,6 +412,43 @@ public void EnumImporters()
tsiImporters.DropDownItems.Clear();
EnumImportersFromDirectory(Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData) + @"\SqlNexus\Importers");
EnumImportersFromDirectory(Application.StartupPath);
+ EnforceTraceImporterExclusivity();
+ }
+
+ ///
+ /// If both ReadTrace and TraceEventImporter are enabled, disable ReadTrace
+ /// (TraceEventImporter is the preferred/modern one).
+ ///
+ private void EnforceTraceImporterExclusivity()
+ {
+ bool readTraceEnabled = IsImporterEnabled(READTRACE_IMPORTER_NAME);
+ bool traceEventEnabled = IsImporterEnabled(TRACEEVENT_IMPORTER_NAME);
+
+ if (readTraceEnabled && traceEventEnabled)
+ {
+ DisableImporterByName(READTRACE_IMPORTER_NAME);
+ MainForm.LogMessage(string.Format("Both '{0}' and '{1}' were enabled. Automatically disabled '{1}' to prevent conflicts.", TRACEEVENT_IMPORTER_NAME, READTRACE_IMPORTER_NAME), MessageOptions.Silent);
+ }
+ }
+
+ ///
+ /// Checks whether the specified importer's Enabled option is checked.
+ ///
+ private bool IsImporterEnabled(string importerName)
+ {
+ foreach (ToolStripMenuItem importerTsi in tsiImporters.DropDownItems)
+ {
+ INexusImporter prod = importerTsi.Tag as INexusImporter;
+ if (prod == null || prod.Name != importerName)
+ continue;
+
+ foreach (ToolStripMenuItem optionTsi in importerTsi.DropDownItems)
+ {
+ if (optionTsi.Text == "Enabled")
+ return optionTsi.Checked;
+ }
+ }
+ return false;
}
private String FileVersions(String file)
{
@@ -560,7 +597,8 @@ private void EnumImportersFromDirectory(string importerDirectory)
"Ignore events associated with PSSDIAG activity",
"Disable event requirement checks",
"Import to SQL (Linux Perf)",
- "Drop existing tables (Linux Perf)"
+ "Drop existing tables (Linux Perf)",
+ "Drop existing ReadTrace tables"
};
foreach (string optionName in optionNames)
@@ -570,10 +608,12 @@ private void EnumImportersFromDirectory(string importerDirectory)
{
// Construct the userSavedKey using the product name and option name
string userSavedKey = string.Format("{0}.{1}", prod.Name, optionName);
- // Get the userSavedValue using ImportOptions
- bool userSavedValue = ImportOptions.IsEnabled(userSavedKey);
- // Update the option with the userSavedValue
- prod.Options[optionName] = userSavedValue;
+ // Only restore saved value if it was actually saved before
+ if (ImportOptions.HasOption(userSavedKey))
+ {
+ bool userSavedValue = ImportOptions.IsEnabled(userSavedKey);
+ prod.Options[optionName] = userSavedValue;
+ }
}
}
@@ -590,23 +630,28 @@ private void EnumImportersFromDirectory(string importerDirectory)
subtsi.Tag = prod.OptionsDialog;
subtsi.Click += new System.EventHandler(this.tsiDialog_Click);
}
- else // boolean
+ else if (prod.Options[option] is bool) // boolean
{
m_OptionList.Add(subtsi);
subtsi.Tag = prod;
subtsi.CheckOnClick = true;
- bool UserSaved = ImportOptions.IsEnabled(String.Format("{0}.{1}", prod.Name, subtsi.Text));
+ string savedKey = String.Format("{0}.{1}", prod.Name, subtsi.Text);
MainForm.LogMessage("load: " + String.Format("{0}->{1}", prod.Name, option), MessageOptions.Silent);
- if (ImportOptions.IsEnabled("SaveImportOptions"))
- subtsi.Checked = UserSaved;
+ if (ImportOptions.IsEnabled("SaveImportOptions") && ImportOptions.HasOption(savedKey))
+ subtsi.Checked = ImportOptions.IsEnabled(savedKey);
else
subtsi.Checked = (bool)prod.Options[option];
subtsi.Click += new System.EventHandler(this.tsiBool_Click);
}
+ else
+ {
+ // Non-boolean, non-dialog options (e.g. int) are not shown in the menu
+ continue;
+ }
tsi.DropDownItems.Add(subtsi);
}
@@ -1387,8 +1432,9 @@ private void RunPostScripts(string importerName)
if (string.IsNullOrEmpty(importerName))
return;
- // Only execute ReadTracePostProcessing.sql when the current importer is the ReadTrace importer.
- bool isReadTraceImporter = importerName.Equals("ReadTrace (SQL XEL/TRC files)", StringComparison.OrdinalIgnoreCase);
+ // Only execute ReadTracePostProcessing.sql when the current importer is ReadTrace or TraceEventImporter (both use the ReadTrace schema).
+ bool isReadTraceImporter = importerName.Equals("ReadTrace (SQL XEL/TRC files)", StringComparison.OrdinalIgnoreCase)
+ || importerName.Equals(TRACEEVENT_IMPORTER_NAME, StringComparison.OrdinalIgnoreCase);
// If nothing to run, skip executing the post scripts.
if (!PostScripts.TryGetValue(importerName, out var scripts) || scripts == null || scripts.Length == 0)
@@ -1582,6 +1628,10 @@ private void tsiSaveOptions_Click(object sender, EventArgs e)
}
+ // Importer names used for mutual exclusivity
+ private const string READTRACE_IMPORTER_NAME = "ReadTrace (SQL XEL/TRC Files)";
+ private const string TRACEEVENT_IMPORTER_NAME = "Trace Event Importer (Managed)";
+
private void tsiBool_Click(object sender, EventArgs e)
{
ToolStripMenuItem tsi = (sender as ToolStripMenuItem);
@@ -1589,12 +1639,57 @@ private void tsiBool_Click(object sender, EventArgs e)
prod.Options[tsi.Text] = tsi.Checked;
if (ImportOptions.IsEnabled("SaveImportOptions"))
{
-
ImportOptions.Set(string.Format("{0}.{1}", prod.Name, tsi.Text), tsi.Checked);
- //LogMessage("strip: " + string.Format("{0}.{1}", prod.Name, tsi.Name), MessageOptions.Silent);
+ }
+
+ // Mutual exclusivity: enabling one trace importer disables the other
+ if (tsi.Text == "Enabled" && tsi.Checked)
+ {
+ string otherImporterName = null;
+ if (prod.Name == TRACEEVENT_IMPORTER_NAME)
+ otherImporterName = READTRACE_IMPORTER_NAME;
+ else if (prod.Name == READTRACE_IMPORTER_NAME)
+ otherImporterName = TRACEEVENT_IMPORTER_NAME;
+ if (otherImporterName != null)
+ {
+ DisableImporterByName(otherImporterName);
+ MessageBox.Show(
+ string.Format("You have enabled '{0}'.\nThe '{1}' importer has been automatically disabled to prevent both from writing to the same tables.", prod.Name, otherImporterName),
+ "Importer Conflict",
+ MessageBoxButtons.OK,
+ MessageBoxIcon.Information);
+ EnumFiles();
+ }
}
+ }
+
+ ///
+ /// Finds the importer menu item by name and unchecks its Enabled option.
+ ///
+ private void DisableImporterByName(string importerName)
+ {
+ foreach (ToolStripMenuItem importerTsi in tsiImporters.DropDownItems)
+ {
+ INexusImporter otherProd = importerTsi.Tag as INexusImporter;
+ if (otherProd == null || otherProd.Name != importerName)
+ continue;
+ foreach (ToolStripMenuItem optionTsi in importerTsi.DropDownItems)
+ {
+ if (optionTsi.Text == "Enabled")
+ {
+ optionTsi.Checked = false;
+ otherProd.Options["Enabled"] = false;
+ if (ImportOptions.IsEnabled("SaveImportOptions"))
+ {
+ ImportOptions.Set(string.Format("{0}.Enabled", importerName), false);
+ }
+ break;
+ }
+ }
+ break;
+ }
}
private void tsiDialog_Click(object sender, EventArgs e)
diff --git a/sqlnexus/sqlnexus.csproj b/sqlnexus/sqlnexus.csproj
index 687ce72f..c0c8c5a3 100644
--- a/sqlnexus/sqlnexus.csproj
+++ b/sqlnexus/sqlnexus.csproj
@@ -1,4 +1,4 @@
-
+
@@ -886,6 +886,10 @@
{8DB02D0C-68F3-4762-9BC9-AA3DF2B91F4B}
RowsetImportEngine
+
+ {B7A3C2D1-E4F5-4A6B-8C9D-0E1F2A3B4C5D}
+ TraceEventImporter
+
@@ -1673,4 +1677,4 @@
-
\ No newline at end of file
+
From b82f54add09f291086793592d84e51dd70187b0f Mon Sep 17 00:00:00 2001
From: JosephPilov-MSFT <23519517+PiJoCoder@users.noreply.github.com>
Date: Mon, 27 Apr 2026 18:38:45 -0500
Subject: [PATCH 02/12] #513 Fix TraceEventImporter CPU unit conversion,
BulkWriter reliability, import logging, and UI fixes
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- Fix XEL `cpu_time` unit conversion in `XelFileReader.cs`: divide by 1000 (microseconds → milliseconds) to match ReadTrace.exe convention
- Wrap all `BulkLoadRowset` usages in `BulkWriter.cs` with `try/finally` blocks to ensure `Close()` is called on exception
- Add per-table row count logging in `TraceEventImporterPlugin.cs` after import for diagnostics
- Close dropdown menus via `cmOptions.Close()` before showing importer conflict MessageBox in `fmImport.cs` to prevent dialog from being hidden behind menus
- Pass `this` as owner to `MessageBox.Show()` for proper modal centering
- Remove `EnumFiles()` call from `tsiBool_Click` to prevent `NullReferenceException` when `instances` is not yet initialized
---
TraceEventImporter/Database/BulkWriter.cs | 92 ++++++++++++++++++-
TraceEventImporter/Readers/XelFileReader.cs | 5 +-
.../TraceEventImporterPlugin.cs | 35 +++++--
sqlnexus/fmImport.cs | 7 +-
4 files changed, 128 insertions(+), 11 deletions(-)
diff --git a/TraceEventImporter/Database/BulkWriter.cs b/TraceEventImporter/Database/BulkWriter.cs
index b8f9aac6..68a8dc02 100644
--- a/TraceEventImporter/Database/BulkWriter.cs
+++ b/TraceEventImporter/Database/BulkWriter.cs
@@ -25,17 +25,25 @@ public BulkWriter(string connectionString)
public void WriteMiscInfo(string attribute, string value)
{
var bl = new BulkLoadRowset("ReadTrace.tblMiscInfo", _connStr);
+ try
+ {
DataRow row = bl.GetNewRow();
row["Attribute"] = attribute;
row["Value"] = (object)value ?? DBNull.Value;
bl.InsertRow(row);
TotalRowsInserted++;
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteTraceFile(long firstSeq, long lastSeq, DateTime? firstTime, DateTime? lastTime, long eventsRead, string fileName)
{
var bl = new BulkLoadRowset("ReadTrace.tblTraceFiles", _connStr);
+ try
+ {
DataRow row = bl.GetNewRow();
row["FirstSeqNumber"] = firstSeq;
row["LastSeqNumber"] = lastSeq;
@@ -45,12 +53,18 @@ public void WriteTraceFile(long firstSeq, long lastSeq, DateTime? firstTime, Dat
row["TraceFileName"] = fileName;
bl.InsertRow(row);
TotalRowsInserted++;
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteTracedEvents(IEnumerable eventIds)
{
var bl = new BulkLoadRowset("ReadTrace.tblTracedEvents", _connStr);
+ try
+ {
foreach (int id in eventIds)
{
DataRow row = bl.GetNewRow();
@@ -58,12 +72,18 @@ public void WriteTracedEvents(IEnumerable eventIds)
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteUniqueAppNames(IEnumerable> appNames)
{
var bl = new BulkLoadRowset("ReadTrace.tblUniqueAppNames", _connStr);
+ try
+ {
foreach (var kvp in appNames)
{
DataRow row = bl.GetNewRow();
@@ -72,12 +92,18 @@ public void WriteUniqueAppNames(IEnumerable> appNames)
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteUniqueLoginNames(IEnumerable> loginNames)
{
var bl = new BulkLoadRowset("ReadTrace.tblUniqueLoginNames", _connStr);
+ try
+ {
foreach (var kvp in loginNames)
{
DataRow row = bl.GetNewRow();
@@ -85,12 +111,18 @@ public void WriteUniqueLoginNames(IEnumerable> loginNa
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteProcedureNames(IEnumerable procs)
{
var bl = new BulkLoadRowset("ReadTrace.tblProcedureNames", _connStr);
+ try
+ {
foreach (var proc in procs)
{
DataRow row = bl.GetNewRow();
@@ -101,12 +133,18 @@ public void WriteProcedureNames(IEnumerable procs)
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteUniqueBatches(IEnumerable batches)
{
var bl = new BulkLoadRowset("ReadTrace.tblUniqueBatches", _connStr);
+ try
+ {
foreach (var b in batches)
{
DataRow row = bl.GetNewRow();
@@ -118,12 +156,18 @@ public void WriteUniqueBatches(IEnumerable batches)
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteUniqueStatements(IEnumerable stmts)
{
var bl = new BulkLoadRowset("ReadTrace.tblUniqueStatements", _connStr);
+ try
+ {
foreach (var s in stmts)
{
DataRow row = bl.GetNewRow();
@@ -134,12 +178,18 @@ public void WriteUniqueStatements(IEnumerable stmts)
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteBatches(List batches)
{
var bl = new BulkLoadRowset("ReadTrace.tblBatches", _connStr);
+ try
+ {
foreach (var b in batches)
{
DataRow row = bl.GetNewRow();
@@ -165,12 +215,18 @@ public void WriteBatches(List batches)
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteStatements(List stmts)
{
var bl = new BulkLoadRowset("ReadTrace.tblStatements", _connStr);
+ try
+ {
foreach (var s in stmts)
{
DataRow row = bl.GetNewRow();
@@ -200,12 +256,18 @@ public void WriteStatements(List stmts)
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteConnections(List conns)
{
var bl = new BulkLoadRowset("ReadTrace.tblConnections", _connStr);
+ try
+ {
foreach (var c in conns)
{
DataRow row = bl.GetNewRow();
@@ -228,12 +290,18 @@ public void WriteConnections(List conns)
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteInterestingEvents(List events)
{
var bl = new BulkLoadRowset("ReadTrace.tblInterestingEvents", _connStr);
+ try
+ {
foreach (var e in events)
{
DataRow row = bl.GetNewRow();
@@ -257,14 +325,18 @@ public void WriteInterestingEvents(List events)
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteTimeIntervals(List intervals)
{
- // tblTimeIntervals has IDENTITY — use SqlBulkCopy with KEEP_IDENTITY off
- // We insert without the TimeInterval column; SQL generates it
var bl = new BulkLoadRowset("ReadTrace.tblTimeIntervals", _connStr);
+ try
+ {
foreach (var ti in intervals)
{
DataRow row = bl.GetNewRow();
@@ -273,12 +345,18 @@ public void WriteTimeIntervals(List intervals)
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteBatchPartialAggs(List aggs)
{
var bl = new BulkLoadRowset("ReadTrace.tblBatchPartialAggs", _connStr);
+ try
+ {
foreach (var a in aggs)
{
DataRow row = bl.GetNewRow();
@@ -305,12 +383,18 @@ public void WriteBatchPartialAggs(List aggs)
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void WriteStmtPartialAggs(List aggs)
{
var bl = new BulkLoadRowset("ReadTrace.tblStmtPartialAggs", _connStr);
+ try
+ {
foreach (var a in aggs)
{
DataRow row = bl.GetNewRow();
@@ -338,7 +422,11 @@ public void WriteStmtPartialAggs(List aggs)
bl.InsertRow(row);
TotalRowsInserted++;
}
+ }
+ finally
+ {
bl.Close();
+ }
}
public void Dispose()
diff --git a/TraceEventImporter/Readers/XelFileReader.cs b/TraceEventImporter/Readers/XelFileReader.cs
index dc543023..b0393af3 100644
--- a/TraceEventImporter/Readers/XelFileReader.cs
+++ b/TraceEventImporter/Readers/XelFileReader.cs
@@ -194,7 +194,10 @@ private void MapSpStmtStarting(IXEvent xe, TraceEvent evt)
private void MapPerformanceFields(IXEvent xe, TraceEvent evt)
{
- evt.CPU = GetFieldInt64Nullable(xe, "cpu_time");
+ // XE reports cpu_time in microseconds; ReadTrace schema stores CPU in milliseconds
+ long? cpuMicroseconds = GetFieldInt64Nullable(xe, "cpu_time");
+ evt.CPU = cpuMicroseconds.HasValue ? cpuMicroseconds.Value / 1000 : (long?)null;
+
evt.Duration = GetFieldInt64Nullable(xe, "duration");
evt.Writes = GetFieldInt64Nullable(xe, "writes");
evt.RowCount = GetFieldInt64Nullable(xe, "row_count");
diff --git a/TraceEventImporter/TraceEventImporterPlugin.cs b/TraceEventImporter/TraceEventImporterPlugin.cs
index 78b55648..b89b0363 100644
--- a/TraceEventImporter/TraceEventImporterPlugin.cs
+++ b/TraceEventImporter/TraceEventImporterPlugin.cs
@@ -228,14 +228,20 @@ public bool DoImport()
writer.WriteMiscInfo("ImporterVersion", "TraceEventImporter 1.0");
// Reference data
- writer.WriteTracedEvents(store.GetTracedEventIds());
- writer.WriteUniqueAppNames(store.GetUniqueAppNames());
- writer.WriteUniqueLoginNames(store.GetUniqueLoginNames());
- writer.WriteProcedureNames(store.GetProcedureNames());
+ var tracedEvents = store.GetTracedEventIds();
+ writer.WriteTracedEvents(tracedEvents);
+ var uniqueAppNames = store.GetUniqueAppNames();
+ writer.WriteUniqueAppNames(uniqueAppNames);
+ var uniqueLoginNames = store.GetUniqueLoginNames();
+ writer.WriteUniqueLoginNames(uniqueLoginNames);
+ var procedureNames = store.GetProcedureNames();
+ writer.WriteProcedureNames(procedureNames);
// Unique text
- writer.WriteUniqueBatches(store.GetUniqueBatches());
- writer.WriteUniqueStatements(store.GetUniqueStatements());
+ var uniqueBatches = store.GetUniqueBatches();
+ writer.WriteUniqueBatches(uniqueBatches);
+ var uniqueStatements = store.GetUniqueStatements();
+ writer.WriteUniqueStatements(uniqueStatements);
// Fact data
writer.WriteConnections(processor.Connections);
@@ -253,6 +259,23 @@ public bool DoImport()
writer.WriteStmtPartialAggs(aggregator.StmtAggs);
_totalRowsInserted = writer.TotalRowsInserted;
+
+ // Log per-table row counts
+ LogMessage("TraceEventImporter: --- Row counts per table ---");
+ LogMessage($"TraceEventImporter: tblTracedEvents: {tracedEvents.Count()}");
+ LogMessage($"TraceEventImporter: tblUniqueAppNames: {uniqueAppNames.Count()}");
+ LogMessage($"TraceEventImporter: tblUniqueLoginNames: {uniqueLoginNames.Count()}");
+ LogMessage($"TraceEventImporter: tblProcedureNames: {procedureNames.Count()}");
+ LogMessage($"TraceEventImporter: tblUniqueBatches: {uniqueBatches.Count()}");
+ LogMessage($"TraceEventImporter: tblUniqueStatements: {uniqueStatements.Count()}");
+ LogMessage($"TraceEventImporter: tblConnections: {processor.Connections.Count}");
+ LogMessage($"TraceEventImporter: tblBatches: {processor.Batches.Count}");
+ LogMessage($"TraceEventImporter: tblStatements: {processor.Statements.Count}");
+ LogMessage($"TraceEventImporter: tblInterestingEvents: {processor.InterestingEvents.Count}");
+ LogMessage($"TraceEventImporter: tblTimeIntervals: {aggregator.TimeIntervals.Count}");
+ LogMessage($"TraceEventImporter: tblBatchPartialAggs: {aggregator.BatchAggs.Count}");
+ LogMessage($"TraceEventImporter: tblStmtPartialAggs: {aggregator.StmtAggs.Count}");
+ LogMessage($"TraceEventImporter: Total rows inserted: {_totalRowsInserted}");
}
// 7. Post-load fixups (indexes, ConnSeq linking, ParentStmtSeq)
diff --git a/sqlnexus/fmImport.cs b/sqlnexus/fmImport.cs
index 9d22291a..f3daaab7 100644
--- a/sqlnexus/fmImport.cs
+++ b/sqlnexus/fmImport.cs
@@ -1654,12 +1654,15 @@ private void tsiBool_Click(object sender, EventArgs e)
if (otherImporterName != null)
{
DisableImporterByName(otherImporterName);
- MessageBox.Show(
+
+ // Close all dropdown menus so the MessageBox isn't hidden behind them
+ cmOptions.Close();
+
+ MessageBox.Show(this,
string.Format("You have enabled '{0}'.\nThe '{1}' importer has been automatically disabled to prevent both from writing to the same tables.", prod.Name, otherImporterName),
"Importer Conflict",
MessageBoxButtons.OK,
MessageBoxIcon.Information);
- EnumFiles();
}
}
}
From 6403503c9f6b2df1bab5d24620039d14e7005f75 Mon Sep 17 00:00:00 2001
From: JosephPilov-MSFT <23519517+PiJoCoder@users.noreply.github.com>
Date: Tue, 28 Apr 2026 17:37:47 -0500
Subject: [PATCH 03/12] #513: Fix redundant null-coalescing on non-nullable
value types and add volatile to _cancelled field
- BulkLoadRowset.cs: Wrap SqlDataAdapter in using statement for proper disposal
- BulkWriter.cs: Remove redundant (object)x ?? DBNull.Value checks for non-nullable
value types (int, byte, long) in WriteBatches, WriteStatements, WriteInterestingEvents,
WriteProcedureNames, WriteUniqueBatches
- TraceEventImporterPlugin.cs: Mark _cancelled field as volatile to ensure cross-thread
visibility and fix always-false condition in inner cancellation check
---
BulkLoadEx/BulkLoadRowset.cs | 10 ++--
TraceEventImporter/Database/BulkWriter.cs | 14 ++---
TraceEventImporter/Readers/XelFileReader.cs | 57 ++++++++++---------
.../TraceEventImporterPlugin.cs | 2 +-
4 files changed, 43 insertions(+), 40 deletions(-)
diff --git a/BulkLoadEx/BulkLoadRowset.cs b/BulkLoadEx/BulkLoadRowset.cs
index 9d72dae7..2bb204ba 100644
--- a/BulkLoadEx/BulkLoadRowset.cs
+++ b/BulkLoadEx/BulkLoadRowset.cs
@@ -91,10 +91,12 @@ public DataRow GetNewRow()
{
quotedName = "[" + targetTable + "]";
}
- SqlDataAdapter schemaAdapter = new SqlDataAdapter("select * from " + quotedName + " where 1=0", cn);
- dataTableBuffer = new DataTable();
- dataTableBuffer.TableName = targetTable;
- schemaAdapter.Fill(this.dataTableBuffer);
+ using (SqlDataAdapter schemaAdapter = new SqlDataAdapter("select * from " + quotedName + " where 1=0", cn))
+ {
+ dataTableBuffer = new DataTable();
+ dataTableBuffer.TableName = targetTable;
+ schemaAdapter.Fill(this.dataTableBuffer);
+ }
}
return this.dataTableBuffer.NewRow();
}
diff --git a/TraceEventImporter/Database/BulkWriter.cs b/TraceEventImporter/Database/BulkWriter.cs
index 68a8dc02..6dbdf0a2 100644
--- a/TraceEventImporter/Database/BulkWriter.cs
+++ b/TraceEventImporter/Database/BulkWriter.cs
@@ -126,9 +126,9 @@ public void WriteProcedureNames(IEnumerable procs)
foreach (var proc in procs)
{
DataRow row = bl.GetNewRow();
- row["DBID"] = (object)proc.DBID ?? DBNull.Value;
- row["ObjectID"] = (object)proc.ObjectID ?? DBNull.Value;
- row["SpecialProcID"] = (object)proc.SpecialProcID ?? DBNull.Value;
+ row["DBID"] = proc.DBID;
+ row["ObjectID"] = proc.ObjectID;
+ row["SpecialProcID"] = proc.SpecialProcID;
row["Name"] = proc.Name ?? "";
bl.InsertRow(row);
TotalRowsInserted++;
@@ -152,7 +152,7 @@ public void WriteUniqueBatches(IEnumerable batches)
row["HashID"] = b.HashID;
row["OrigText"] = b.OrigText ?? "";
row["NormText"] = b.NormText ?? "";
- row["SpecialProcID"] = (object)b.SpecialProcID ?? DBNull.Value;
+ row["SpecialProcID"] = b.SpecialProcID;
bl.InsertRow(row);
TotalRowsInserted++;
}
@@ -205,7 +205,7 @@ public void WriteBatches(List batches)
row["Writes"] = (object)b.Writes ?? DBNull.Value;
row["CPU"] = (object)b.CPU ?? DBNull.Value;
row["fRPCEvent"] = b.fRPCEvent;
- row["DBID"] = (object)b.DBID ?? DBNull.Value;
+ row["DBID"] = b.DBID;
row["StartSeq"] = (object)b.StartSeq ?? DBNull.Value;
row["EndSeq"] = (object)b.EndSeq ?? DBNull.Value;
row["AttnSeq"] = (object)b.AttnSeq ?? DBNull.Value;
@@ -242,7 +242,7 @@ public void WriteStatements(List stmts)
row["Writes"] = (object)s.Writes ?? DBNull.Value;
row["CPU"] = (object)s.CPU ?? DBNull.Value;
row["Rows"] = (object)s.Rows ?? DBNull.Value;
- row["DBID"] = (object)s.DBID ?? DBNull.Value;
+ row["DBID"] = s.DBID;
row["ObjectID"] = (object)s.ObjectID ?? DBNull.Value;
row["NestLevel"] = s.NestLevel.HasValue ? (object)(byte)s.NestLevel.Value : DBNull.Value;
row["fDynamicSQL"] = s.fDynamicSQL;
@@ -313,7 +313,7 @@ public void WriteInterestingEvents(List events)
row["StartTime"] = (object)e.StartTime ?? DBNull.Value;
row["EndTime"] = (object)e.EndTime ?? DBNull.Value;
row["Duration"] = (object)e.Duration ?? DBNull.Value;
- row["DBID"] = (object)e.DBID ?? DBNull.Value;
+ row["DBID"] = e.DBID;
row["IntegerData"] = (object)e.IntegerData ?? DBNull.Value;
row["EventSubclass"] = (object)e.EventSubclass ?? DBNull.Value;
row["TextData"] = (object)e.TextData ?? DBNull.Value;
diff --git a/TraceEventImporter/Readers/XelFileReader.cs b/TraceEventImporter/Readers/XelFileReader.cs
index b0393af3..5d1cdffd 100644
--- a/TraceEventImporter/Readers/XelFileReader.cs
+++ b/TraceEventImporter/Readers/XelFileReader.cs
@@ -27,41 +27,42 @@ public XelFileReader(long startingSeq = 0)
public IEnumerable ReadEvents(string filePath)
{
// XELite uses async callbacks; bridge to synchronous IEnumerable via BlockingCollection
- var collection = new BlockingCollection(boundedCapacity: 1000);
-
- Task readerTask = Task.Run(() =>
+ using (BlockingCollection collection = new BlockingCollection(boundedCapacity: 1000))
{
- try
+ Task readerTask = Task.Run(() =>
{
- var streamer = new XEFileEventStreamer(filePath);
- streamer.ReadEventStream(
- () => Task.CompletedTask,
- xevent =>
- {
- TraceEvent evt = MapEvent(xevent);
- if (evt != null)
+ try
+ {
+ var streamer = new XEFileEventStreamer(filePath);
+ streamer.ReadEventStream(
+ () => Task.CompletedTask,
+ xevent =>
{
- if (evt.Seq == 0)
- evt.Seq = Interlocked.Increment(ref _globalSeq);
- collection.Add(evt);
- }
- return Task.CompletedTask;
- },
- CancellationToken.None).Wait();
- }
- finally
+ TraceEvent evt = MapEvent(xevent);
+ if (evt != null)
+ {
+ if (evt.Seq == 0)
+ evt.Seq = Interlocked.Increment(ref _globalSeq);
+ collection.Add(evt);
+ }
+ return Task.CompletedTask;
+ },
+ CancellationToken.None).Wait();
+ }
+ finally
+ {
+ collection.CompleteAdding();
+ }
+ });
+
+ foreach (TraceEvent evt in collection.GetConsumingEnumerable())
{
- collection.CompleteAdding();
+ yield return evt;
}
- });
- foreach (TraceEvent evt in collection.GetConsumingEnumerable())
- {
- yield return evt;
+ // Propagate any exception from the reader task
+ readerTask.GetAwaiter().GetResult();
}
-
- // Propagate any exception from the reader task
- readerTask.GetAwaiter().GetResult();
}
private TraceEvent MapEvent(IXEvent xe)
diff --git a/TraceEventImporter/TraceEventImporterPlugin.cs b/TraceEventImporter/TraceEventImporterPlugin.cs
index b89b0363..5d7679ed 100644
--- a/TraceEventImporter/TraceEventImporterPlugin.cs
+++ b/TraceEventImporter/TraceEventImporterPlugin.cs
@@ -38,7 +38,7 @@ public class TraceEventImporterPlugin : INexusImporter, INexusProgressReporter
private string _database;
private string _fileMask;
private ImportState _state = ImportState.Idle;
- private bool _cancelled;
+ private volatile bool _cancelled;
private long _totalRowsInserted;
private long _totalLinesProcessed;
private long _currentPosition;
From b6edf056248b9a1096cf187ebc654d4209a4dc36 Mon Sep 17 00:00:00 2001
From: JosephPilov-MSFT <23519517+PiJoCoder@users.noreply.github.com>
Date: Wed, 29 Apr 2026 12:18:42 -0500
Subject: [PATCH 04/12] #513: Align EventProcessor with ReadTrace.exe and fix
CodeQL warnings
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
EventProcessor - align sequence and connection handling with ReadTrace.exe:
- BatchSeq/StmtSeq now use Starting event's global sequence (no separate counters)
- Statement→Batch linking uses in-flight _curBatchStartSeq session state
- ConnSeq uses global sequence namespace for both real and fake connections
- Add "CONNECTED BEFORE TRACE" placeholder rows for sessions with no login event
- Only create fake connection rows from batch completion (not statements)
- Retain _sessionConnSeq on logout to prevent duplicate fake connection rows
---
.../Processing/EventProcessor.cs | 148 ++++++++++++++----
1 file changed, 121 insertions(+), 27 deletions(-)
diff --git a/TraceEventImporter/Processing/EventProcessor.cs b/TraceEventImporter/Processing/EventProcessor.cs
index 9f0bc781..f7da04c4 100644
--- a/TraceEventImporter/Processing/EventProcessor.cs
+++ b/TraceEventImporter/Processing/EventProcessor.cs
@@ -26,10 +26,20 @@ public class EventProcessor
private readonly Dictionary _connections =
new Dictionary();
- // Sequence counters
- private long _batchSeq;
- private long _stmtSeq;
- private long _connSeq;
+ // Per session+request: the StartSeq of the current in-flight batch.
+ // This is how ReadTrace links statements to their parent batch.
+ private readonly Dictionary _curBatchStartSeq =
+ new Dictionary();
+
+ // Sessions that have had a connection event (login/existing connection).
+ // Used to detect sessions that were "connected before trace" started.
+ // Value is the ConnSeq assigned to that session's connection row.
+ private readonly Dictionary _sessionConnSeq =
+ new Dictionary();
+
+ // All sequence values (ConnSeq, BatchSeq, StmtSeq) are derived from the
+ // global event sequence (evt.Seq), aligned with ReadTrace.exe which uses
+ // pEvent->GetGlobalSeq() for all of them. No separate counters needed.
// Collected rows for bulk insert
public List Batches { get; } = new List();
@@ -124,6 +134,11 @@ public void Finalize()
private void HandleBatchStarting(TraceEvent evt)
{
var key = new SessionRequestKey(evt.SessionId, evt.RequestId);
+
+ // Track the starting sequence for this session+request so statements
+ // processed during batch execution can reference it (ReadTrace pattern).
+ _curBatchStartSeq[key] = evt.Seq;
+
_pendingBatches[key] = new PendingBatch
{
StartSeq = evt.Seq,
@@ -140,13 +155,16 @@ private void HandleBatchStarting(TraceEvent evt)
private void HandleBatchCompleted(TraceEvent evt)
{
var key = new SessionRequestKey(evt.SessionId, evt.RequestId);
- long batchSeq = ++_batchSeq;
PendingBatch pending = null;
_pendingBatches.TryGetValue(key, out pending);
if (pending != null)
_pendingBatches.Remove(key);
+ // BatchSeq = StartSeq if available, otherwise EndSeq (matches ReadTrace.exe:
+ // Row.BatchSeq_Value = (StartSeq_Status == OK) ? StartSeq : EndSeq)
+ long batchSeq = pending?.StartSeq ?? evt.Seq;
+
// Use completed event's text, fall back to starting event's text
string textData = evt.TextData ?? pending?.TextData;
string objectName = evt.ObjectName ?? pending?.ObjectName;
@@ -173,6 +191,10 @@ private void HandleBatchCompleted(TraceEvent evt)
int appNameId = _store.GetOrAddAppName(evt.ApplicationName ?? pending?.ApplicationName);
int loginNameId = _store.GetOrAddLoginName(evt.LoginName ?? pending?.LoginName);
+ // Determine ConnSeq for this batch. If the session has no connection event,
+ // create a "connected before trace" placeholder (matches ReadTrace.exe behavior).
+ long connSeq = EnsureConnectionForSession(evt);
+
var row = new BatchRow
{
BatchSeq = batchSeq,
@@ -191,7 +213,7 @@ private void HandleBatchCompleted(TraceEvent evt)
StartSeq = pending?.StartSeq,
EndSeq = evt.Seq,
AttnSeq = null,
- ConnSeq = null,
+ ConnSeq = connSeq,
TextData = textData,
OrigRowCount = evt.RowCount,
AppNameID = appNameId,
@@ -199,6 +221,10 @@ private void HandleBatchCompleted(TraceEvent evt)
};
Batches.Add(row);
+
+ // Clear the in-flight batch sequence now that the batch has completed
+ // (matches ReadTrace: pStateInfo->CurBatchStartSeq = 0)
+ _curBatchStartSeq.Remove(key);
}
#endregion
@@ -224,7 +250,6 @@ private void HandleStatementStarting(TraceEvent evt)
private void HandleStatementCompleted(TraceEvent evt)
{
var key = new SessionRequestKey(evt.SessionId, evt.RequestId);
- long stmtSeq = ++_stmtSeq;
PendingStatement pending = null;
if (_pendingStatements.TryGetValue(key, out var stack) && stack.Count > 0)
@@ -236,19 +261,29 @@ private void HandleStatementCompleted(TraceEvent evt)
_store.TryAddStatement(hashId, textData, normText);
- // Find the parent batch
+ // StmtSeq = StartSeq if available, otherwise EndSeq (matches ReadTrace.exe:
+ // Row.StmtSeq_Value = (StartSeq_Status == OK) ? StartSeq : EndSeq)
+ long stmtSeq = pending?.StartSeq ?? evt.Seq;
+
+ // Find the parent batch using the in-flight batch's starting sequence.
+ // This is the key difference from the old approach: ReadTrace maintains
+ // CurBatchStartSeq as live session state set when BatchStarting arrives,
+ // so statements that complete *during* batch execution get linked.
long? batchSeq = null;
- if (Batches.Count > 0)
+ _curBatchStartSeq.TryGetValue(key, out long curBatchStart);
+ if (curBatchStart > 0)
{
- // Find the most recent batch for this session+request
- for (int i = Batches.Count - 1; i >= 0; i--)
- {
- if (Batches[i].Session == evt.SessionId && Batches[i].Request == evt.RequestId)
- {
- batchSeq = Batches[i].BatchSeq;
- break;
- }
- }
+ batchSeq = curBatchStart;
+ }
+
+ // Get ConnSeq for this statement's session. Unlike batches, statements
+ // do NOT create fake "CONNECTED BEFORE TRACE" rows — they just reference
+ // the existing ConnSeq if available (matches ReadTrace.exe: InsertStmt
+ // uses CurConnectSeq directly, only InsertBatch calls InsertConnectEvent).
+ long? connSeq = null;
+ if (_sessionConnSeq.TryGetValue(evt.SessionId, out long existingConnSeq))
+ {
+ connSeq = existingConnSeq;
}
int appNameId = _store.GetOrAddAppName(evt.ApplicationName);
@@ -274,7 +309,7 @@ private void HandleStatementCompleted(TraceEvent evt)
fDynamicSQL = false,
StartSeq = pending?.StartSeq,
EndSeq = evt.Seq,
- ConnSeq = null,
+ ConnSeq = connSeq,
BatchSeq = batchSeq,
ParentStmtSeq = null, // Filled in post-load fixups
AttnSeq = null,
@@ -290,9 +325,58 @@ private void HandleStatementCompleted(TraceEvent evt)
#region Connection Handling
+ ///
+ /// Ensures a connection row exists for the given event's session.
+ /// If no login/existing-connection event was seen for this session,
+ /// creates a "CONNECTED BEFORE TRACE" placeholder row using the event's
+ /// global sequence as ConnSeq (matches ReadTrace.exe: CurConnectSeq = BatchSeq_Value).
+ /// All ConnSeq values come from the global sequence space, ensuring uniqueness.
+ /// Returns the ConnSeq for the session.
+ ///
+ private long EnsureConnectionForSession(TraceEvent evt)
+ {
+ if (_sessionConnSeq.TryGetValue(evt.SessionId, out long existingConnSeq))
+ {
+ return existingConnSeq;
+ }
+
+ // No login event seen for this session — create a placeholder.
+ // ReadTrace.exe uses the batch's BatchSeq (which equals its StartSeq global
+ // sequence) as ConnSeq for fake connection rows. Since we call this from
+ // HandleBatchCompleted/HandleStatementCompleted, evt.Seq is the triggering
+ // event's unique global sequence — guaranteed unique per event.
+ long fakeConnSeq = evt.Seq;
+
+ Connections.Add(new ConnectionRow
+ {
+ ConnSeq = fakeConnSeq,
+ Session = evt.SessionId,
+ StartTime = null,
+ EndTime = null,
+ Duration = null,
+ Reads = null,
+ Writes = null,
+ CPU = null,
+ ApplicationName = "CONNECTED BEFORE TRACE",
+ LoginName = "CONNECTED BEFORE TRACE",
+ HostName = null,
+ NTDomainName = null,
+ NTUserName = null,
+ StartSeq = null,
+ EndSeq = null,
+ TextData = null
+ });
+
+ _sessionConnSeq[evt.SessionId] = fakeConnSeq;
+ return fakeConnSeq;
+ }
+
private void HandleLogin(TraceEvent evt)
{
- long connSeq = ++_connSeq;
+ // ConnSeq = the login event's global sequence (matches ReadTrace.exe:
+ // pStateInfo->CurConnectSeq = pEvent->GetGlobalSeq())
+ long connSeq = evt.Seq;
+
_connections[evt.SessionId] = new ConnectionInfo
{
ConnSeq = connSeq,
@@ -306,6 +390,9 @@ private void HandleLogin(TraceEvent evt)
NTUserName = evt.NTUserName,
TextData = evt.TextData
};
+
+ // Track that this session now has a real connection event
+ _sessionConnSeq[evt.SessionId] = connSeq;
}
private void HandleLogout(TraceEvent evt)
@@ -332,6 +419,15 @@ private void HandleLogout(TraceEvent evt)
TextData = conn.TextData
});
_connections.Remove(evt.SessionId);
+
+ // Do NOT clear _sessionConnSeq here. ReadTrace.exe clears
+ // CurConnectSeq = 0 on logout, but then creates only ONE fake
+ // connection per session gap. Our EnsureConnectionForSession
+ // already caches per-session, so keeping the last known ConnSeq
+ // prevents creating duplicate fake connection rows for every
+ // batch that arrives between logout and the next login.
+ // The next HandleLogin will overwrite _sessionConnSeq with the
+ // new login's global sequence anyway.
}
}
@@ -354,15 +450,13 @@ private void HandleAttention(TraceEvent evt)
private void HandleInterestingEvent(TraceEvent evt)
{
- // Find parent batch
+ // Find parent batch using in-flight batch sequence (matches ReadTrace)
+ var key = new SessionRequestKey(evt.SessionId, evt.RequestId);
long? batchSeq = null;
- for (int i = Batches.Count - 1; i >= 0; i--)
+ _curBatchStartSeq.TryGetValue(key, out long curBatchStart);
+ if (curBatchStart > 0)
{
- if (Batches[i].Session == evt.SessionId)
- {
- batchSeq = Batches[i].BatchSeq;
- break;
- }
+ batchSeq = curBatchStart;
}
InterestingEvents.Add(new InterestingEventRow
From d59e011aff708b69247982d8ea127eee9565505a Mon Sep 17 00:00:00 2001
From: JosephPilov-MSFT <23519517+PiJoCoder@users.noreply.github.com>
Date: Wed, 29 Apr 2026 12:37:45 -0500
Subject: [PATCH 05/12] #513: Extract inner SQL from sp_executesql and preserve
named parameters
- Add SpExecuteSqlExtractor to extract inner SQL query from sp_executesql,
sp_prepexec, sp_prepare, and sp_cursor* RPC calls for normalization/hashing
(aligns with ReadTrace.exe GetRPCParamText behavior)
- SqlTextNormalizer: only normalize auto-generated RPC params (@P0, @P1, ...)
to @P#; preserve named parameters (@JOB_ID, @ALL, @IS_SYSTEM) as-is
(aligns with ReadTrace.exe normalization)
---
.../Normalization/SpExecuteSqlExtractor.cs | 116 ++++++++++++++++++
.../Normalization/SqlTextNormalizer.cs | 37 +++++-
.../Processing/EventProcessor.cs | 18 ++-
3 files changed, 167 insertions(+), 4 deletions(-)
create mode 100644 TraceEventImporter/Normalization/SpExecuteSqlExtractor.cs
diff --git a/TraceEventImporter/Normalization/SpExecuteSqlExtractor.cs b/TraceEventImporter/Normalization/SpExecuteSqlExtractor.cs
new file mode 100644
index 00000000..18200d1e
--- /dev/null
+++ b/TraceEventImporter/Normalization/SpExecuteSqlExtractor.cs
@@ -0,0 +1,116 @@
+using System;
+
+namespace TraceEventImporter.Normalization
+{
+ ///
+ /// Extracts the inner SQL query text from sp_executesql, sp_prepexec, and sp_prepare
+ /// RPC calls. ReadTrace.exe does this via GetRPCParamText() to normalize/hash the
+ /// actual query rather than the wrapper call.
+ ///
+ public static class SpExecuteSqlExtractor
+ {
+ // SpecialProcIDs that carry an inner SQL query as the first parameter
+ private const byte SP_PREPARE_ID = 1;
+ private const byte SP_EXECUTESQL_ID = 3;
+ private const byte SP_PREPEXEC_ID = 4;
+ private const byte SP_CURSOROPEN_ID = 5;
+ private const byte SP_CURSORPREPARE_ID = 9;
+ private const byte SP_CURSORPREPEXEC_ID = 10;
+
+ ///
+ /// Returns true if this SpecialProcID should have its inner SQL extracted.
+ ///
+ public static bool ShouldExtractInnerSql(byte specialProcId)
+ {
+ return specialProcId == SP_EXECUTESQL_ID
+ || specialProcId == SP_PREPEXEC_ID
+ || specialProcId == SP_PREPARE_ID
+ || specialProcId == SP_CURSOROPEN_ID
+ || specialProcId == SP_CURSORPREPARE_ID
+ || specialProcId == SP_CURSORPREPEXEC_ID;
+ }
+
+ ///
+ /// Extracts the inner SQL query from a sp_executesql/sp_prepexec/sp_prepare
+ /// TextData string. Returns the inner SQL if found, otherwise returns null.
+ ///
+ /// Expected formats:
+ /// exec sp_executesql N'SELECT ...', N'@p1 int', @p1=42
+ /// exec sp_executesql N'SELECT ...'
+ /// SELECT ... (already extracted by trace infrastructure)
+ ///
+ public static string TryExtractInnerSql(string textData)
+ {
+ if (string.IsNullOrEmpty(textData))
+ return null;
+
+ // Find the first N' or ' which starts the SQL parameter
+ int i = 0;
+ int len = textData.Length;
+
+ // Skip past the proc name to find the first string parameter
+ // Look for N'...' or '...' pattern
+ while (i < len)
+ {
+ // N'...' unicode string literal
+ if (i < len - 1 && (textData[i] == 'N' || textData[i] == 'n') && textData[i + 1] == '\'')
+ {
+ // Check if this is after the proc name (not part of it)
+ // by verifying we've passed at least one space/comma
+ if (i > 0 && IsAfterProcName(textData, i))
+ {
+ return ExtractQuotedString(textData, i + 2);
+ }
+ }
+
+ // '...' regular string literal
+ if (textData[i] == '\'' && i > 0 && IsAfterProcName(textData, i))
+ {
+ return ExtractQuotedString(textData, i + 1);
+ }
+
+ i++;
+ }
+
+ return null;
+ }
+
+ private static bool IsAfterProcName(string text, int pos)
+ {
+ // Walk backwards to check we're past whitespace/comma after the proc name
+ int j = pos - 1;
+ while (j >= 0 && (text[j] == ' ' || text[j] == '\t' || text[j] == ','))
+ j--;
+ // We should be past at least a few chars (the proc name)
+ return j > 3;
+ }
+
+ private static string ExtractQuotedString(string text, int startAfterQuote)
+ {
+ var result = new System.Text.StringBuilder();
+ int i = startAfterQuote;
+ int len = text.Length;
+
+ while (i < len)
+ {
+ if (text[i] == '\'')
+ {
+ // Check for escaped quote ''
+ if (i + 1 < len && text[i + 1] == '\'')
+ {
+ result.Append('\'');
+ i += 2;
+ continue;
+ }
+ // End of string
+ break;
+ }
+ result.Append(text[i]);
+ i++;
+ }
+
+ string inner = result.ToString().Trim();
+ return inner.Length > 0 ? inner : null;
+ }
+ }
+}
\ No newline at end of file
diff --git a/TraceEventImporter/Normalization/SqlTextNormalizer.cs b/TraceEventImporter/Normalization/SqlTextNormalizer.cs
index 19e4f0dd..15f70415 100644
--- a/TraceEventImporter/Normalization/SqlTextNormalizer.cs
+++ b/TraceEventImporter/Normalization/SqlTextNormalizer.cs
@@ -125,10 +125,25 @@ public static string Normalize(string sql)
}
else
{
- // @user_variable -> @P#
+ // @user_variable — capture the name to decide whether to normalize
+ int nameStart = i;
while (i < len && IsIdentChar(sql[i]))
i++;
- AppendWithSpace(result, "@P#");
+
+ // ReadTrace only normalizes auto-generated RPC parameter names
+ // (@P0, @P1, @P2, ...) to @P#. Named parameters like @JOB_ID,
+ // @ALL, @IS_SYSTEM are preserved as-is (uppercased).
+ string varName = sql.Substring(nameStart, i - nameStart);
+ if (IsAutoGeneratedParam(varName))
+ {
+ AppendWithSpace(result, "@P#");
+ }
+ else
+ {
+ result.Append('@');
+ for (int k = 0; k < varName.Length; k++)
+ result.Append(char.ToUpperInvariant(varName[k]));
+ }
}
continue;
}
@@ -297,5 +312,23 @@ private static void AppendWithSpace(StringBuilder sb, string text)
{
sb.Append(text);
}
+
+ ///
+ /// Returns true if the variable name is an auto-generated RPC parameter
+ /// (e.g., P0, P1, P2, ...). These are the only @variables that get
+ /// normalized to @P# (matches ReadTrace.exe behavior).
+ ///
+ private static bool IsAutoGeneratedParam(string name)
+ {
+ // Must be "P" followed by one or more digits (case-insensitive)
+ if (name.Length < 2) return false;
+ if (name[0] != 'P' && name[0] != 'p') return false;
+ for (int i = 1; i < name.Length; i++)
+ {
+ if (!char.IsDigit(name[i]))
+ return false;
+ }
+ return true;
+ }
}
}
diff --git a/TraceEventImporter/Processing/EventProcessor.cs b/TraceEventImporter/Processing/EventProcessor.cs
index f7da04c4..6983b874 100644
--- a/TraceEventImporter/Processing/EventProcessor.cs
+++ b/TraceEventImporter/Processing/EventProcessor.cs
@@ -172,10 +172,24 @@ private void HandleBatchCompleted(TraceEvent evt)
// Determine special proc and normalize
byte specialProcId = isRpc ? SpecialProcDetector.GetSpecialProcId(objectName) : (byte)0;
- string normText = SqlTextNormalizer.Normalize(textData);
+
+ // For sp_executesql, sp_prepexec, sp_prepare, etc.: extract the inner SQL
+ // query text and use that for normalization/hashing (matches ReadTrace.exe
+ // which calls GetRPCParamText to get the actual query from the first parameter).
+ string textForNormalization = textData;
+ if (SpExecuteSqlExtractor.ShouldExtractInnerSql(specialProcId))
+ {
+ string innerSql = SpExecuteSqlExtractor.TryExtractInnerSql(textData);
+ if (innerSql != null)
+ {
+ textForNormalization = innerSql;
+ }
+ }
+
+ string normText = SqlTextNormalizer.Normalize(textForNormalization);
long hashId = HashComputer.ComputeHash(normText, specialProcId);
- // Add to unique store
+ // Add to unique store — store original textData but normalized inner SQL
_store.TryAddBatch(hashId, textData, normText, specialProcId);
// Track procedure name
From 45facae1aa60d8d4615f54a3d4f087a1af9de2d9 Mon Sep 17 00:00:00 2001
From: JosephPilov-MSFT <23519517+PiJoCoder@users.noreply.github.com>
Date: Wed, 29 Apr 2026 20:21:52 -0500
Subject: [PATCH 06/12] #513 remove the .TRC importer as it wasn't successful
in importing due to file format header. Not a common use case
---
TraceEventImporter/Readers/TrcFileReader.cs | 470 ------------------
.../TraceEventImporterPlugin.cs | 51 +-
2 files changed, 10 insertions(+), 511 deletions(-)
delete mode 100644 TraceEventImporter/Readers/TrcFileReader.cs
diff --git a/TraceEventImporter/Readers/TrcFileReader.cs b/TraceEventImporter/Readers/TrcFileReader.cs
deleted file mode 100644
index 5a264438..00000000
--- a/TraceEventImporter/Readers/TrcFileReader.cs
+++ /dev/null
@@ -1,470 +0,0 @@
-using System;
-using System.Collections.Generic;
-using System.IO;
-using System.Text;
-using TraceEventImporter.Models;
-
-namespace TraceEventImporter.Readers
-{
- ///
- /// Reads SQL Server Profiler trace (.trc) files in binary format.
- /// Implements a simplified binary parser based on the Yukon+ .trc file format
- /// documented in READ80TRACE/trccommon.h and 80TraceIO.cpp.
- /// No SMO or SQL Server dependency required.
- ///
- public class TrcFileReader : ITraceEventReader
- {
- // TRACE_SPECIAL_COLUMNS
- private const ushort TRACE_BEGIN_RECORD = 0xFFF6;
- private const ushort TRACE_FLUSH = 0xFFF2;
- private const ushort SQL_TRACE_VERSION = 0xFFFE;
- private const ushort TRACED_OPTIONS = 0xFFFD;
- private const ushort TRACED_EVENTS = 0xFFFC;
- private const ushort TRACED_FILTERS = 0xFFFB;
- private const ushort TRACE_START = 0xFFFA;
- private const ushort TRACE_STOP = 0xFFF9;
- private const ushort TRACE_TEXT_FILTERED = 0xFFF5;
-
- // Column IDs (from trccommon.h TRACE_DATA_COLUMNS)
- private const int COL_TEXT = 1;
- private const int COL_BINARYDATA = 2;
- private const int COL_DBID = 3;
- private const int COL_LINENO = 5;
- private const int COL_NTUSERNAME = 6;
- private const int COL_NTDOMAINNAME = 7;
- private const int COL_HOSTNAME = 8;
- private const int COL_APPNAME = 10;
- private const int COL_LOGINNAME = 11;
- private const int COL_SESSION = 12;
- private const int COL_DURATION = 13;
- private const int COL_STARTTIME = 14;
- private const int COL_ENDTIME = 15;
- private const int COL_READS = 16;
- private const int COL_WRITES = 17;
- private const int COL_CPU = 18;
- private const int COL_SEVERITY = 20;
- private const int COL_SUBCLASS = 21;
- private const int COL_OBJID = 22;
- private const int COL_INTDATA = 25;
- private const int COL_CLASS = 27;
- private const int COL_NESTLEVEL = 29;
- private const int COL_STATE = 30;
- private const int COL_ERROR = 31;
- private const int COL_OBJNAME = 34;
- private const int COL_ROWCOUNTS = 48;
- private const int COL_REQUESTID = 49;
- private const int COL_EVENTSEQ = 51;
- private const int COL_OFFSET = 61;
-
- // Max columns in trace format
- private const int TOTAL_COLUMNS = 76;
-
- // Fixed-size column sizes (bytes). Columns not listed are variable-length.
- private static readonly Dictionary FixedColumnSizes = new Dictionary
- {
- { COL_DBID, 4 }, { 4, 4 }, { COL_LINENO, 4 }, { 9, 4 },
- { COL_SESSION, 4 }, { COL_DURATION, 8 }, { COL_STARTTIME, 8 }, { COL_ENDTIME, 8 },
- { COL_READS, 8 }, { COL_WRITES, 8 }, { COL_CPU, 8 }, { 19, 4 },
- { COL_SEVERITY, 4 }, { COL_SUBCLASS, 4 }, { COL_OBJID, 4 }, { 23, 4 },
- { 24, 4 }, { COL_INTDATA, 4 }, { COL_CLASS, 2 }, { 28, 4 },
- { COL_NESTLEVEL, 4 }, { COL_STATE, 4 }, { COL_ERROR, 4 }, { 32, 4 },
- { 33, 4 }, { COL_ROWCOUNTS, 8 }, { COL_REQUESTID, 4 }, { 50, 8 },
- { COL_EVENTSEQ, 8 }, { 52, 8 }, { 53, 8 }, { 54, 16 },
- { 55, 4 }, { 56, 4 }, { 57, 4 }, { 58, 4 },
- { 60, 4 }, { COL_OFFSET, 4 }, { 62, 4 }, { 66, 4 },
- { 68, 16 },
- };
-
- private long _globalSeq;
-
- public TrcFileReader(long startingSeq = 0)
- {
- _globalSeq = startingSeq;
- }
-
- public string[] SupportedExtensions => new[] { ".trc" };
-
- public IEnumerable ReadEvents(string filePath)
- {
- using (var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read, 65536))
- using (var reader = new BinaryReader(stream, Encoding.Unicode))
- {
- // Read file header
- var header = ReadFileHeader(reader);
- if (header == null)
- yield break;
-
- bool isYukon = header.MajorVersion >= 9;
-
- // Read events
- while (stream.Position < stream.Length - 2)
- {
- TraceEvent evt = null;
- try
- {
- evt = ReadNextEvent(reader, stream, isYukon);
- }
- catch (EndOfStreamException)
- {
- break;
- }
- catch (Exception)
- {
- // Skip malformed events
- continue;
- }
-
- if (evt != null)
- {
- if (evt.Seq == 0)
- evt.Seq = ++_globalSeq;
- yield return evt;
- }
- }
- }
- }
-
- private TrcHeader ReadFileHeader(BinaryReader reader)
- {
- try
- {
- var header = new TrcHeader();
- header.UnicodeTag = reader.ReadUInt16();
- if (header.UnicodeTag != 0xFEFF)
- return null;
-
- header.HeaderSize = reader.ReadUInt16();
- header.TraceVersion = reader.ReadUInt16();
-
- // Provider name: WCHAR[128] = 256 bytes
- byte[] providerBytes = reader.ReadBytes(256);
- header.ProviderName = Encoding.Unicode.GetString(providerBytes).TrimEnd('\0');
-
- // Definition type: WCHAR[64] = 128 bytes
- reader.ReadBytes(128);
-
- header.MajorVersion = reader.ReadByte();
- header.MinorVersion = reader.ReadByte();
- header.BuildNumber = reader.ReadUInt16();
- header.HeaderOptions = reader.ReadUInt32();
-
- // Server name: WCHAR[128] = 256 bytes
- byte[] serverBytes = reader.ReadBytes(256);
- header.ServerName = Encoding.Unicode.GetString(serverBytes).TrimEnd('\0');
-
- // RepeatBase
- header.RepeatBase = reader.ReadUInt16();
-
- return header;
- }
- catch
- {
- return null;
- }
- }
-
- private TraceEvent ReadNextEvent(BinaryReader reader, Stream stream, bool isYukon)
- {
- // Find TRACE_BEGIN_RECORD marker
- while (true)
- {
- if (stream.Position >= stream.Length - 2)
- return null;
-
- ushort marker = reader.ReadUInt16();
-
- if (marker == 0) // EOF
- return null;
-
- // Handle special columns (skip them)
- if (marker >= TRACE_FLUSH && marker <= SQL_TRACE_VERSION && marker != TRACE_BEGIN_RECORD)
- {
- SkipSpecialColumn(reader, marker);
- continue;
- }
-
- if (marker == TRACE_BEGIN_RECORD)
- break;
-
- // Unexpected data — try to resync
- if (marker < 1 || marker > TOTAL_COLUMNS)
- continue;
-
- // It's a column ID outside of a record context — skip
- SkipColumnData(reader, marker);
- }
-
- // Read TRACE_BEGIN_RECORD data
- ushort eventId;
- int recordLength;
-
- if (isYukon)
- {
- // TrcBeginRecordData: USHORT eventId + LONG recordLength
- eventId = reader.ReadUInt16();
- recordLength = reader.ReadInt32();
- }
- else
- {
- // Shiloh: SHORT eventClass + SHORT colCount packed in 4 bytes
- int packed = reader.ReadInt32();
- eventId = (ushort)(packed & 0xFFFF);
- recordLength = -1; // Read until next TRACE_BEGIN_RECORD
- }
-
- var evt = new TraceEvent();
- evt.EventId = eventId;
- evt.EventType = MapEventClass(eventId);
-
- // Read columns within the record
- long recordStart = stream.Position;
- long recordEnd = recordLength > 0 ? recordStart + recordLength - 6 : stream.Length; // -6 for begin record header
-
- while (stream.Position < recordEnd - 1)
- {
- if (stream.Position >= stream.Length - 2)
- break;
-
- ushort colId = reader.ReadUInt16();
-
- if (colId == TRACE_BEGIN_RECORD || colId == 0)
- {
- // We've hit the next event or EOF — back up and return
- stream.Position -= 2;
- break;
- }
-
- // Skip special markers within events
- if (colId >= TRACE_FLUSH && colId <= SQL_TRACE_VERSION)
- {
- break;
- }
-
- if (colId == TRACE_TEXT_FILTERED)
- colId = COL_TEXT;
-
- try
- {
- ReadAndApplyColumn(reader, evt, colId);
- }
- catch
- {
- break;
- }
- }
-
- return evt;
- }
-
- private void ReadAndApplyColumn(BinaryReader reader, TraceEvent evt, int colId)
- {
- if (FixedColumnSizes.TryGetValue(colId, out int fixedSize))
- {
- byte[] data = reader.ReadBytes(fixedSize);
- if (data.Length < fixedSize) return;
- ApplyFixedColumn(evt, colId, data);
- }
- else
- {
- // Variable-length: read length prefix
- byte firstByte = reader.ReadByte();
- int dataLen;
- if (firstByte == 0xFF)
- dataLen = reader.ReadInt32();
- else
- dataLen = firstByte;
-
- if (dataLen <= 0 || dataLen > 10 * 1024 * 1024) // Sanity: max 10MB per column
- return;
-
- byte[] data = reader.ReadBytes(dataLen);
- if (data.Length < dataLen) return;
- ApplyVariableColumn(evt, colId, data);
- }
- }
-
- private void ApplyFixedColumn(TraceEvent evt, int colId, byte[] data)
- {
- switch (colId)
- {
- case COL_DBID:
- evt.DatabaseId = BitConverter.ToInt32(data, 0);
- break;
- case COL_SESSION:
- evt.SessionId = BitConverter.ToInt32(data, 0);
- break;
- case COL_DURATION:
- evt.Duration = BitConverter.ToInt64(data, 0);
- break;
- case COL_STARTTIME:
- evt.StartTime = ReadSqlDateTime(data, 0);
- break;
- case COL_ENDTIME:
- evt.EndTime = ReadSqlDateTime(data, 0);
- break;
- case COL_READS:
- evt.Reads = BitConverter.ToInt64(data, 0);
- break;
- case COL_WRITES:
- evt.Writes = BitConverter.ToInt64(data, 0);
- break;
- case COL_CPU:
- evt.CPU = BitConverter.ToInt64(data, 0);
- break;
- case COL_OBJID:
- evt.ObjectId = BitConverter.ToInt32(data, 0);
- break;
- case COL_NESTLEVEL:
- evt.NestLevel = BitConverter.ToInt32(data, 0);
- break;
- case COL_ERROR:
- evt.Error = BitConverter.ToInt32(data, 0);
- break;
- case COL_SEVERITY:
- evt.Severity = BitConverter.ToInt32(data, 0);
- break;
- case COL_STATE:
- evt.State = BitConverter.ToInt32(data, 0);
- break;
- case COL_SUBCLASS:
- evt.EventSubclass = BitConverter.ToInt32(data, 0);
- break;
- case COL_INTDATA:
- evt.IntegerData = BitConverter.ToInt32(data, 0);
- break;
- case COL_ROWCOUNTS:
- evt.RowCount = BitConverter.ToInt64(data, 0);
- break;
- case COL_REQUESTID:
- evt.RequestId = BitConverter.ToInt32(data, 0);
- break;
- case COL_EVENTSEQ:
- evt.Seq = BitConverter.ToInt64(data, 0);
- break;
- case COL_OFFSET:
- evt.Offset = BitConverter.ToInt32(data, 0);
- break;
- case COL_LINENO:
- evt.LineNumber = BitConverter.ToInt32(data, 0);
- break;
- }
- }
-
- private void ApplyVariableColumn(TraceEvent evt, int colId, byte[] data)
- {
- switch (colId)
- {
- case COL_TEXT:
- evt.TextData = Encoding.Unicode.GetString(data).TrimEnd('\0');
- break;
- case COL_LOGINNAME:
- evt.LoginName = Encoding.Unicode.GetString(data).TrimEnd('\0');
- break;
- case COL_APPNAME:
- evt.ApplicationName = Encoding.Unicode.GetString(data).TrimEnd('\0');
- break;
- case COL_HOSTNAME:
- evt.HostName = Encoding.Unicode.GetString(data).TrimEnd('\0');
- break;
- case COL_NTUSERNAME:
- evt.NTUserName = Encoding.Unicode.GetString(data).TrimEnd('\0');
- break;
- case COL_NTDOMAINNAME:
- evt.NTDomainName = Encoding.Unicode.GetString(data).TrimEnd('\0');
- break;
- case COL_OBJNAME:
- evt.ObjectName = Encoding.Unicode.GetString(data).TrimEnd('\0');
- break;
- }
- }
-
- private static DateTime? ReadSqlDateTime(byte[] data, int offset)
- {
- if (data.Length < offset + 8) return null;
- try
- {
- // SQL Server datetime: 4-byte days since 1900-01-01 + 4-byte 300ths of second
- int days = BitConverter.ToInt32(data, offset);
- uint ticks = BitConverter.ToUInt32(data, offset + 4);
- DateTime baseDate = new DateTime(1900, 1, 1);
- return baseDate.AddDays(days).AddMilliseconds(ticks * 10.0 / 3.0);
- }
- catch
- {
- return null;
- }
- }
-
- private void SkipSpecialColumn(BinaryReader reader, ushort marker)
- {
- // Special columns have variable data — read and discard
- try
- {
- switch (marker)
- {
- case SQL_TRACE_VERSION:
- case TRACED_OPTIONS:
- case TRACE_START:
- case TRACE_STOP:
- // These have a 4-byte length followed by data
- int len = reader.ReadInt32();
- if (len > 0 && len < 1024 * 1024)
- reader.ReadBytes(len);
- break;
- case TRACED_EVENTS:
- case TRACED_FILTERS:
- // Variable-length: read length prefix
- byte fb = reader.ReadByte();
- int dlen = fb == 0xFF ? reader.ReadInt32() : fb;
- if (dlen > 0 && dlen < 1024 * 1024)
- reader.ReadBytes(dlen);
- break;
- case TRACE_FLUSH:
- // No data
- break;
- }
- }
- catch { }
- }
-
- private void SkipColumnData(BinaryReader reader, int colId)
- {
- try
- {
- if (FixedColumnSizes.TryGetValue(colId, out int size))
- {
- reader.ReadBytes(size);
- }
- else
- {
- byte fb = reader.ReadByte();
- int len = fb == 0xFF ? reader.ReadInt32() : fb;
- if (len > 0 && len < 10 * 1024 * 1024)
- reader.ReadBytes(len);
- }
- }
- catch { }
- }
-
- private static TraceEventType MapEventClass(int eventId)
- {
- if (Enum.IsDefined(typeof(TraceEventType), eventId))
- return (TraceEventType)eventId;
- return TraceEventType.Unknown;
- }
-
- private class TrcHeader
- {
- public ushort UnicodeTag;
- public ushort HeaderSize;
- public ushort TraceVersion;
- public string ProviderName;
- public byte MajorVersion;
- public byte MinorVersion;
- public ushort BuildNumber;
- public uint HeaderOptions;
- public string ServerName;
- public ushort RepeatBase;
- }
- }
-}
diff --git a/TraceEventImporter/TraceEventImporterPlugin.cs b/TraceEventImporter/TraceEventImporterPlugin.cs
index 5d7679ed..c747c962 100644
--- a/TraceEventImporter/TraceEventImporterPlugin.cs
+++ b/TraceEventImporter/TraceEventImporterPlugin.cs
@@ -16,9 +16,10 @@
namespace TraceEventImporter
{
///
- /// SqlNexus importer plugin that reads .trc and .xel trace files, normalizes SQL text,
+ /// SqlNexus importer plugin that reads .xel trace files, normalizes SQL text,
/// computes HashIDs, and bulk-loads data into the ReadTrace schema.
- /// Coexists with the existing ReadTraceNexusImporter (which shells out to ReadTrace.exe).
+ /// Coexists with the existing ReadTraceNexusImporter (which handles .trc files
+ /// by shelling out to ReadTrace.exe).
/// Discovered automatically by SqlNexus via DLL reflection.
///
public class TraceEventImporterPlugin : INexusImporter, INexusProgressReporter
@@ -58,7 +59,9 @@ public TraceEventImporterPlugin()
public string Name => "Trace Event Importer (Managed)";
- public string[] SupportedMasks => new[] { "*.trc", "*pssdiag*.xel", "*LogScout*.xel" };
+ // .trc files are handled by ReadTraceNexusImporter (ReadTrace.exe).
+ // This importer handles XEL files only.
+ public string[] SupportedMasks => new[] { "*pssdiag*.xel", "*LogScout*.xel" };
public Dictionary Options => _options;
@@ -125,8 +128,7 @@ public bool DoImport()
return false;
}
- string[] allFiles = Directory.GetFiles(dir, mask);
- string[] files = allFiles.Where(f => !SkipFile(f)).OrderBy(f => f).ToArray();
+ string[] files = Directory.GetFiles(dir, mask).OrderBy(f => f).ToArray();
if (files.Length == 0)
{
@@ -135,13 +137,9 @@ public bool DoImport()
return false;
}
- int excluded = allFiles.Length - files.Length;
- if (excluded > 0)
- LogMessage($"TraceEventImporter: Excluded {excluded} file(s) (log_NNN.trc / *_blk.trc).");
-
LogMessage($"TraceEventImporter: Processing {files.Length} file(s)...");
- // 2. Deploy schema (always ensure tables exist; option controls whether to drop first)
+ // 2. Deploy schema
LogMessage("TraceEventImporter: Creating ReadTrace schema and tables...");
DeploySchema();
@@ -157,14 +155,9 @@ public bool DoImport()
{
if (_cancelled) break;
- string ext = Path.GetExtension(file).ToLowerInvariant();
LogMessage($"TraceEventImporter: Reading {Path.GetFileName(file)}...");
- ITraceEventReader reader;
- if (ext == ".xel")
- reader = new XelFileReader(globalSeq);
- else
- reader = new TrcFileReader(globalSeq);
+ ITraceEventReader reader = new XelFileReader(globalSeq);
long fileFirstSeq = long.MaxValue;
long fileLastSeq = 0;
@@ -192,7 +185,6 @@ public bool DoImport()
fileLastTime = evt.StartTime;
}
- // Report progress periodically
if (fileEventsRead % 10000 == 0)
{
_currentPosition = fileEventsRead;
@@ -200,7 +192,6 @@ public bool DoImport()
}
}
- // Record trace file info
if (fileEventsRead > 0)
{
writer.WriteTraceFile(fileFirstSeq, fileLastSeq, fileFirstTime, fileLastTime, fileEventsRead, Path.GetFileName(file));
@@ -222,12 +213,10 @@ public bool DoImport()
// 5. Write all data
LogMessage("TraceEventImporter: Writing data to database...");
- // Metadata
writer.WriteMiscInfo("SchemaVersion", "3.0");
writer.WriteMiscInfo("LoadDateTime", DateTime.UtcNow.ToString("yyyy-MM-dd HH:mm:ss"));
writer.WriteMiscInfo("ImporterVersion", "TraceEventImporter 1.0");
- // Reference data
var tracedEvents = store.GetTracedEventIds();
writer.WriteTracedEvents(tracedEvents);
var uniqueAppNames = store.GetUniqueAppNames();
@@ -237,13 +226,11 @@ public bool DoImport()
var procedureNames = store.GetProcedureNames();
writer.WriteProcedureNames(procedureNames);
- // Unique text
var uniqueBatches = store.GetUniqueBatches();
writer.WriteUniqueBatches(uniqueBatches);
var uniqueStatements = store.GetUniqueStatements();
writer.WriteUniqueStatements(uniqueStatements);
- // Fact data
writer.WriteConnections(processor.Connections);
writer.WriteBatches(processor.Batches);
writer.WriteStatements(processor.Statements);
@@ -260,7 +247,6 @@ public bool DoImport()
_totalRowsInserted = writer.TotalRowsInserted;
- // Log per-table row counts
LogMessage("TraceEventImporter: --- Row counts per table ---");
LogMessage($"TraceEventImporter: tblTracedEvents: {tracedEvents.Count()}");
LogMessage($"TraceEventImporter: tblUniqueAppNames: {uniqueAppNames.Count()}");
@@ -278,7 +264,7 @@ public bool DoImport()
LogMessage($"TraceEventImporter: Total rows inserted: {_totalRowsInserted}");
}
- // 7. Post-load fixups (indexes, ConnSeq linking, ParentStmtSeq)
+ // 7. Post-load fixups
LogMessage("TraceEventImporter: Running post-load fixups...");
RunPostLoadFixups();
@@ -342,7 +328,6 @@ private void RunPostLoadFixups()
private void ExecuteSqlScript(string script)
{
- // Split on GO statements (same approach as CSql in NexusInterfaces)
string[] batches = Regex.Split(script, @"^\s*GO\s*$", RegexOptions.Multiline | RegexOptions.IgnoreCase);
int executedCount = 0;
@@ -380,22 +365,6 @@ private static string LoadEmbeddedResource(string resourceName)
}
}
- private bool SkipFile(string fullFileName)
- {
- string name = Path.GetFileName(fullFileName);
- if (string.IsNullOrEmpty(name)) return false;
-
- // Skip blocked trace files
- if (name.EndsWith("_blk.trc", StringComparison.OrdinalIgnoreCase))
- return true;
-
- // Skip internal log trace files
- if (Regex.IsMatch(name, @"^log_\d+\.trc$", RegexOptions.IgnoreCase))
- return true;
-
- return false;
- }
-
private void LogMessage(string msg)
{
if (_logger != null)
From a42596530958689418327f8d9ea92d20a34f7258 Mon Sep 17 00:00:00 2001
From: JosephPilov-MSFT <23519517+PiJoCoder@users.noreply.github.com>
Date: Wed, 29 Apr 2026 20:41:18 -0500
Subject: [PATCH 07/12] #513 fix(TraceEventImporter): fix Unique
Batch/Statement Details report returning no data
tblUniqueBatches.Seq and tblUniqueStatements.Seq must store the global
event sequence of the first occurrence of each unique batch/statement,
matching the value in tblBatches.BatchSeq / tblStatements.StmtSeq.
Previously, UniqueStore used auto-incremented local counters
(++_uniqueBatchSeq / ++_uniqueStmtSeq) which produced small sequential
values (1, 2, 3...) that never matched the large XEL event sequence
numbers stored as BatchSeq/StmtSeq in the detail tables.
This caused spReporter_ExampleBatchDetails (and related report procs)
to return zero rows because the join `tblUniqueBatches.Seq = tblBatches.BatchSeq`
could never be satisfied.
Changes:
- UniqueStore.TryAddBatch: add batchSeq parameter; use it as Seq
- UniqueStore.TryAddStatement: add stmtSeq parameter; use it as Seq
- EventProcessor.HandleBatchCompleted: pass batchSeq to TryAddBatch
- EventProcessor.HandleStatementCompleted: declare stmtSeq before use,
pass it to TryAddStatement
- Remove unused _uniqueBatchSeq and _uniqueStmtSeq counters from UniqueStore
---
TraceEventImporter/Processing/EventProcessor.cs | 8 ++++----
TraceEventImporter/Processing/UniqueStore.cs | 8 ++++----
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/TraceEventImporter/Processing/EventProcessor.cs b/TraceEventImporter/Processing/EventProcessor.cs
index 6983b874..65b682a3 100644
--- a/TraceEventImporter/Processing/EventProcessor.cs
+++ b/TraceEventImporter/Processing/EventProcessor.cs
@@ -7,7 +7,7 @@ namespace TraceEventImporter.Processing
{
///
/// Correlates Starting↔Completed events per session+request, normalizes SQL text,
- /// computes HashIDs, and produces rows for tblBatches, tblStatements, tblConnections,
+ /// computes HashID, and produces rows for tblBatches, tblStatements, tblConnections,
/// and tblInterestingEvents.
///
public class EventProcessor
@@ -190,7 +190,7 @@ private void HandleBatchCompleted(TraceEvent evt)
long hashId = HashComputer.ComputeHash(normText, specialProcId);
// Add to unique store — store original textData but normalized inner SQL
- _store.TryAddBatch(hashId, textData, normText, specialProcId);
+ _store.TryAddBatch(batchSeq, hashId, textData, normText, specialProcId);
// Track procedure name
if (isRpc && !string.IsNullOrEmpty(objectName))
@@ -273,12 +273,12 @@ private void HandleStatementCompleted(TraceEvent evt)
string normText = SqlTextNormalizer.Normalize(textData);
long hashId = HashComputer.ComputeHash(normText);
- _store.TryAddStatement(hashId, textData, normText);
-
// StmtSeq = StartSeq if available, otherwise EndSeq (matches ReadTrace.exe:
// Row.StmtSeq_Value = (StartSeq_Status == OK) ? StartSeq : EndSeq)
long stmtSeq = pending?.StartSeq ?? evt.Seq;
+ _store.TryAddStatement(stmtSeq, hashId, textData, normText);
+
// Find the parent batch using the in-flight batch's starting sequence.
// This is the key difference from the old approach: ReadTrace maintains
// CurBatchStartSeq as live session state set when BatchStarting arrives,
diff --git a/TraceEventImporter/Processing/UniqueStore.cs b/TraceEventImporter/Processing/UniqueStore.cs
index 9a2705d9..e9a7e7b1 100644
--- a/TraceEventImporter/Processing/UniqueStore.cs
+++ b/TraceEventImporter/Processing/UniqueStore.cs
@@ -23,14 +23,14 @@ public class UniqueStore
#region Unique Batches
- public bool TryAddBatch(long hashId, string origText, string normText, byte specialProcId)
+ public bool TryAddBatch(long batchSeq, long hashId, string origText, string normText, byte specialProcId)
{
if (_uniqueBatches.ContainsKey(hashId))
return false;
_uniqueBatches[hashId] = new UniqueBatch
{
- Seq = ++_uniqueBatchSeq,
+ Seq = batchSeq, // first-occurrence BatchSeq (matches tblBatches.BatchSeq)
HashID = hashId,
OrigText = origText ?? "",
NormText = normText ?? "",
@@ -45,14 +45,14 @@ public bool TryAddBatch(long hashId, string origText, string normText, byte spec
#region Unique Statements
- public bool TryAddStatement(long hashId, string origText, string normText)
+ public bool TryAddStatement(long stmtSeq, long hashId, string origText, string normText)
{
if (_uniqueStatements.ContainsKey(hashId))
return false;
_uniqueStatements[hashId] = new UniqueStatement
{
- Seq = ++_uniqueStmtSeq,
+ Seq = stmtSeq, // first-occurrence StmtSeq (matches tblStatements.StmtSeq)
HashID = hashId,
OrigText = origText,
NormText = normText
From 0821be7250a225ec4cb0b5c03d91b573a62d353c Mon Sep 17 00:00:00 2001
From: JosephPilov-MSFT <23519517+PiJoCoder@users.noreply.github.com>
Date: Wed, 6 May 2026 10:20:56 -0500
Subject: [PATCH 08/12] #513 Normalize currency-prefixed numeric parameters in
SqlTextNormalizer
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Added IsCurrencySymbol() helper covering the Unicode Currency Symbols
block (\u20A0-\u20CF) plus the Latin-1 extras $, ¢, £, ¤, ¥ — all
expressed as \uXXXX escapes to avoid encoding fragility.
In the main normalization loop, a currency symbol immediately followed
by a digit is now skipped before the numeric literal handler runs, so
values like $380, €50, or ¥100 are normalized to {##} the same as a
bare number. The guard on the numeric literal section is also relaxed
to allow digits preceded by a currency symbol, covering the case where
$ is otherwise treated as an identifier character.
String/character data is unaffected: the currency handler requires
char.IsDigit on the next character, and string literals are consumed
and continued well before this code is reached.
---
.../Normalization/SqlTextNormalizer.cs | 59 ++++++++++++++++++-
1 file changed, 58 insertions(+), 1 deletion(-)
diff --git a/TraceEventImporter/Normalization/SqlTextNormalizer.cs b/TraceEventImporter/Normalization/SqlTextNormalizer.cs
index 15f70415..57cd7327 100644
--- a/TraceEventImporter/Normalization/SqlTextNormalizer.cs
+++ b/TraceEventImporter/Normalization/SqlTextNormalizer.cs
@@ -148,8 +148,16 @@ public static string Normalize(string sql)
continue;
}
+ // --- Currency-prefixed numeric literal: $380, \u00A5100, \u20AC50, \u00A320, etc. ---
+ if (IsCurrencySymbol(c) && i + 1 < len && char.IsDigit(sql[i + 1]))
+ {
+ i++; // skip the currency symbol
+ c = sql[i];
+ // fall through to numeric literal handling below
+ }
+
// --- Numeric literal ---
- if (char.IsDigit(c) && (i == 0 || !IsIdentChar(sql[i - 1])))
+ if (char.IsDigit(c) && (i == 0 || !IsIdentChar(sql[i - 1]) || IsCurrencySymbol(sql[i - 1])))
{
bool hasDecimal = false;
while (i < len && char.IsDigit(sql[i]))
@@ -272,6 +280,55 @@ private static bool IsIdentChar(char c)
return char.IsLetterOrDigit(c) || c == '_' || c == '#' || c == '$';
}
+ ///
+ /// Returns true if the character is a currency symbol (dollar, yen/yuan, euro, pound, etc.)
+ /// that can appear as a prefix before a numeric value in SQL parameters.
+ ///
+ private static bool IsCurrencySymbol(char c)
+ {
+ // \u0024 $ dollar (USD, CAD, AUD, ...)
+ // \u00A2 ¢ cent
+ // \u00A3 £ pound sterling (GBP)
+ // \u00A4 ¤ generic currency sign
+ // \u00A5 ¥ yen / yuan (JPY, CNY)
+ // \u20A0-\u20CF Unicode Currency Symbols block:
+ // \u20A0 â‚ euro-currency sign
+ // \u20A1 ₡ colón (CRC)
+ // \u20A2 â‚¢ cruzeiro (BRL legacy)
+ // \u20A3 â‚£ franc (CHF legacy)
+ // \u20A4 ₤ lira sign
+ // \u20A5 â‚¥ mill sign
+ // \u20A6 ₦ naira (NGN)
+ // \u20A7 â‚§ peseta (ESP)
+ // \u20A8 ₨ rupee sign
+ // \u20A9 â‚© won (KRW)
+ // \u20AA ₪ shekel (ILS)
+ // \u20AB â‚« dong (VND)
+ // \u20AC € euro (EUR)
+ // \u20AD â‚ kip (LAK)
+ // \u20AE ₮ tögrög (MNT)
+ // \u20AF ₯ drachma (GRD)
+ // \u20B0 â‚° pfennig
+ // \u20B1 ₱ peso (PHP)
+ // \u20B2 ₲ guaranà (PYG)
+ // \u20B3 ₳ austral (ARS legacy)
+ // \u20B4 â‚´ hryvnia (UAH)
+ // \u20B5 ₵ cedi (GHS)
+ // \u20B6 â‚¶ livre tournois
+ // \u20B7 â‚· spesmilo
+ // \u20B8 ₸ tenge (KZT)
+ // \u20B9 ₹ rupee (INR)
+ // \u20BA ₺ lira (TRY)
+ // \u20BB â‚» nordic mark
+ // \u20BC ₼ manat (AZN)
+ // \u20BD ₽ ruble (RUB)
+ // \u20BE ₾ lari (GEL)
+ // \u20BF â‚¿ bitcoin
+ // \u20C0-\u20CF reserved for future currency symbols
+ return c == '\u0024' || c == '\u00A2' || c == '\u00A3' || c == '\u00A4' || c == '\u00A5' ||
+ (c >= '\u20A0' && c <= '\u20CF');
+ }
+
private static bool IsGuidLiteral(string sql, int pos)
{
// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} = 38 chars
From deb70c0ff52fbc2b513fef71ce160fb0f53d9e1c Mon Sep 17 00:00:00 2001
From: JosephPilov-MSFT <23519517+PiJoCoder@users.noreply.github.com>
Date: Wed, 6 May 2026 18:14:13 -0500
Subject: [PATCH 09/12] #513 fix(importer): add TraceEventImporter to
sqlnexus.csproj and harden DLL discovery diagnostics
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- Add TraceEventImporter ProjectReference to sqlnexus.csproj so MSBuild
copies TraceEventImporter.dll (and its dependencies) to the output
directory; without this the Import menu entry was never created
- Fix broken pre-existing TraceEventImporter fragment in sqlnexus.csproj
(placeholder GUID, missing closing tag) that caused
an MSBuild XML parse error; preserve the valid ErrorLogImporter reference
that was nested inside it
fmImport.cs — harden EnumImportersFromDirectory diagnostics:
- Wrap GetExportedTypes() in its own try/catch; ReflectionTypeLoadException
now logs each individual LoaderException (exact missing dependency name)
instead of silently terminating the scan for remaining DLLs
- Wrap CreateInstance/cast in try/catch so an InvalidCastException from a
mismatched NexusInterfaces version is logged per-type rather than crashing
- Distinguish "expected" DLLs (those sqlnexus.exe directly references at
compile time via GetReferencedAssemblies()) from unrelated support libs;
load failures for expected DLLs escalate to MessageOptions.Both (log +
status bar) instead of Silent
- Add per-DLL "loaded but registered no importers" warning for expected DLLs
(catches interface version mismatches, wrong SNK key, etc.)
- Add post-scan loop over referenced assemblies containing "Import" to log
any expected importer DLL absent from the scanned directory (Silent)
- Replace hardcoded _expectedImporterDlls string[] with a
Lazy> built from Assembly.GetReferencedAssemblies() so
the list stays in sync automatically when new ProjectReferences are added
- Add final post-scan check in EnumImporters() that warns at Both level if
TraceEventImporter or ReadTraceNexusImporter never registered, naming the
exact DLL and startup path to check
---
sqlnexus/fmImport.cs | 147 +++++++++++++++++++++++++++++++++++++--
sqlnexus/sqlnexus.csproj | 7 +-
2 files changed, 144 insertions(+), 10 deletions(-)
diff --git a/sqlnexus/fmImport.cs b/sqlnexus/fmImport.cs
index f7610dee..cac165a9 100644
--- a/sqlnexus/fmImport.cs
+++ b/sqlnexus/fmImport.cs
@@ -1,4 +1,4 @@
-using System;
+using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
@@ -413,6 +413,34 @@ public void EnumImporters()
EnumImportersFromDirectory(Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData) + @"\SqlNexus\Importers");
EnumImportersFromDirectory(Application.StartupPath);
EnforceTraceImporterExclusivity();
+
+ // Final check: warn for any expected importer that never made it into the menu,
+ // regardless of which directory was scanned. This is the most user-visible signal.
+ var registeredNames = new HashSet(StringComparer.OrdinalIgnoreCase);
+ foreach (ToolStripMenuItem item in tsiImporters.DropDownItems)
+ {
+ INexusImporter prod = item.Tag as INexusImporter;
+ if (prod != null)
+ registeredNames.Add(prod.Name);
+ }
+
+ // Map DLL base name → the Name the importer registers itself under
+ var expectedDllToName = new Dictionary(StringComparer.OrdinalIgnoreCase)
+ {
+ { "TraceEventImporter", TRACEEVENT_IMPORTER_NAME },
+ { "ReadTraceNexusImporter", READTRACE_IMPORTER_NAME },
+ };
+
+ foreach (var kvp in expectedDllToName)
+ {
+ if (!registeredNames.Contains(kvp.Value))
+ {
+ MainForm.LogMessage(string.Format(
+ "WARNING: Importer '{0}' (from {1}.dll) did not register in the Import menu. " +
+ "Check the application log for load errors, or verify that {1}.dll is present in '{2}'.",
+ kvp.Value, kvp.Key, Application.StartupPath), MessageOptions.Both);
+ }
+ }
}
///
@@ -509,15 +537,29 @@ private List OrderedImporterFiles(string[] files)
return OrderedImporters;
}
+ // The set of assembly names that sqlnexus.exe directly references at compile time.
+ // Built once from the executing assembly's manifest so it stays in sync automatically:
+ // any new importer added as a ProjectReference to sqlnexus.csproj is immediately
+ // included without any code change here.
+ private static readonly Lazy> _referencedByHost = new Lazy>(() =>
+ {
+ var names = new HashSet(StringComparer.OrdinalIgnoreCase);
+ foreach (AssemblyName asmName in Assembly.GetExecutingAssembly().GetReferencedAssemblies())
+ names.Add(asmName.Name);
+ return names;
+ });
+
private void EnumImportersFromDirectory(string importerDirectory)
{
if (!Directory.Exists(importerDirectory))
return;
-
+
string[] Files = Directory.GetFiles(importerDirectory, "*.DLL");
List OrderedFiles = OrderedImporterFiles(Files);
- // List of option names
+ // Track which expected DLL names were actually seen in this directory so that
+ // EnumImporters() can warn about any that are present on disk but failed to register.
+ var seenExpected = new HashSet(StringComparer.OrdinalIgnoreCase);
foreach (string file in OrderedFiles)
{
@@ -529,20 +571,55 @@ private void EnumImportersFromDirectory(string importerDirectory)
MainForm.LogMessage(String.Format(Properties.Resources.Msg_NativeImage, file));
continue;
}
+
+ string dllBaseName = Path.GetFileNameWithoutExtension(file);
+ // A DLL is "expected" if sqlnexus.exe directly references it at compile time
+ // (i.e. it was added as a ProjectReference). No list to maintain here.
+ bool isExpected = _referencedByHost.Value.Contains(dllBaseName);
+
Assembly Assem;
try
{
Assem = Assembly.LoadFile(file);
+ }
+ catch (Exception ex)
+ {
+ string msg = string.Format("Assembly '{0}' could not be loaded: {1}", Path.GetFileName(file), ex.Message);
+ // Use a visible log level when a known importer DLL fails to load
+ MainForm.LogMessage(msg, isExpected ? MessageOptions.Both : MessageOptions.Silent);
+ continue;
+ }
-
+ Type[] typs;
+ try
+ {
+ typs = Assem.GetExportedTypes();
+ }
+ catch (ReflectionTypeLoadException rtle)
+ {
+ // Log the loader exceptions — these reveal exactly which dependency is missing
+ var loaderMsgs = rtle.LoaderExceptions != null
+ ? string.Join("; ", rtle.LoaderExceptions
+ .Where(le => le != null)
+ .Select(le => le.Message))
+ : rtle.Message;
+ string msg = string.Format(
+ "Assembly '{0}' loaded but type inspection failed (missing dependency?): {1}",
+ Path.GetFileName(file), loaderMsgs);
+ MainForm.LogMessage(msg, isExpected ? MessageOptions.Both : MessageOptions.Silent);
+ continue;
}
catch (Exception ex)
{
- MainForm.LogMessage("Assembly " + file + " could not be used as an importer: " + ex.Message, MessageOptions.Silent);
+ string msg = string.Format(
+ "Assembly '{0}' loaded but GetExportedTypes() failed: {1}",
+ Path.GetFileName(file), ex.Message);
+ MainForm.LogMessage(msg, isExpected ? MessageOptions.Both : MessageOptions.Silent);
continue;
}
- Type[] typs = Assem.GetExportedTypes();
+ int importersRegisteredFromThisDll = 0;
+
foreach (Type typ in typs)
{
//Ignore abstract classes
@@ -560,7 +637,23 @@ private void EnumImportersFromDirectory(string importerDirectory)
//If we get in here, the Class implements the interface, so add it to the list
//and bail
- INexusImporter prod = (INexusImporter)Assem.CreateInstance(typ.FullName, true);
+ INexusImporter prod;
+ try
+ {
+ prod = (INexusImporter)Assem.CreateInstance(typ.FullName, true);
+ }
+ catch (Exception ex)
+ {
+ MainForm.LogMessage(string.Format(
+ "Failed to instantiate or cast '{0}' from '{1}': {2}",
+ typ.FullName, Path.GetFileName(file), ex.Message),
+ isExpected ? MessageOptions.Both : MessageOptions.Silent);
+ continue;
+ }
+
+ importersRegisteredFromThisDll++;
+ if (isExpected)
+ seenExpected.Add(dllBaseName);
prod.StatusChanged += new System.EventHandler(this.ImportStatusChanged);
if (prod is INexusProgressReporter)
@@ -664,6 +757,46 @@ private void EnumImportersFromDirectory(string importerDirectory)
};
}
}
+
+ // Log if a DLL that looks like an importer loaded and reflected cleanly but
+ // registered nothing — this catches mismatched interface versions, wrong SNK, etc.
+ if (importersRegisteredFromThisDll == 0 && isExpected)
+ {
+ MainForm.LogMessage(string.Format(
+ "WARNING: '{0}' loaded successfully but registered no INexusImporter entries. " +
+ "The importer will not appear in the Import menu. " +
+ "Check that the DLL targets the correct NexusInterfaces version.",
+ Path.GetFileName(file)), MessageOptions.Both);
+ }
+ else if (importersRegisteredFromThisDll > 0)
+ {
+ MainForm.LogMessage(string.Format(
+ "'{0}' registered {1} importer(s).", Path.GetFileName(file), importersRegisteredFromThisDll),
+ MessageOptions.Silent);
+ }
+ }
+
+ // After scanning, log any compile-time-referenced DLL that wasn't even found on
+ // disk in this directory. Only check DLLs that are both referenced AND absent;
+ // support libraries (Azure.Core, System.Memory, etc.) are expected not to be here
+ // when scanning the AppData importers directory, so we limit to DLLs whose names
+ // suggest they are importers (contain "Import") to avoid log noise.
+ var foundOnDiskNames = new HashSet(
+ OrderedFiles.Select(f => Path.GetFileNameWithoutExtension(f)),
+ StringComparer.OrdinalIgnoreCase);
+
+ foreach (string refName in _referencedByHost.Value)
+ {
+ if (refName.IndexOf("Import", StringComparison.OrdinalIgnoreCase) < 0)
+ continue; // skip non-importer support libraries
+
+ if (!foundOnDiskNames.Contains(refName))
+ {
+ MainForm.LogMessage(string.Format(
+ "Referenced importer DLL '{0}.dll' was not found in '{1}'. " +
+ "The importer will not appear in the Import menu.",
+ refName, importerDirectory), MessageOptions.Silent);
+ }
}
// Add the SQLDiag/AlwaysOn XEL option under Importers with sub-items
diff --git a/sqlnexus/sqlnexus.csproj b/sqlnexus/sqlnexus.csproj
index 2ecd5598..3210aa76 100644
--- a/sqlnexus/sqlnexus.csproj
+++ b/sqlnexus/sqlnexus.csproj
@@ -870,6 +870,10 @@
{ccf72846-1645-4548-8e66-a2ffce83692d}
LinuxPerfImporter
+
+ {08FCB9FF-C633-4B28-947B-39D553091600}
+ TraceEventImporter
+
{2217E9CA-442E-46C5-AB6C-7A46AE41A22C}
NexusInterfaces
@@ -886,9 +890,6 @@
{8DB02D0C-68F3-4762-9BC9-AA3DF2B91F4B}
RowsetImportEngine
-
- {B7A3C2D1-E4F5-4A6B-8C9D-0E1F2A3B4C5D}
- TraceEventImporter
{B3A4C5D6-E7F8-4A9B-0C1D-2E3F4A5B6C7D}
ErrorLogImporter
From 3ff32c38d6f98aea5f5bd62a47863cda89289caa Mon Sep 17 00:00:00 2001
From: JosephPilov-MSFT <23519517+PiJoCoder@users.noreply.github.com>
Date: Wed, 6 May 2026 21:58:41 -0500
Subject: [PATCH 10/12] #513 CodeQL and style fixes across Normalization,
Readers, and Processing
SpExecuteSqlExtractor.cs
- Combined nested `if` into single condition for N'...' unicode literal
detection; moved comment above the block
SqlTextNormalizer.cs
- Binary literal (0xHEX): folded inner identifier-boundary guard into the
outer `if` condition; added clarifying comment
- IsHexDigit: split three-range return onto separate lines to resolve
CodeQL "complex condition: too many logical operations" warning
- IsShowplanVariable: replaced `string prefix = ""` + `+=` concatenation
with `StringBuilder` per CodeQL recommendation; eliminated intermediate
`prefix` variable by calling `.Length` and `.ToString()` directly on the
builder
XelFileReader.cs
- Replaced all four empty `catch { }` blocks in the Field/Action helpers
with `catch (Exception ex)` + `Debug.WriteLine` per CodeQL
"empty catch block" rule; `Debug.WriteLine` is used (vs `Trace`) so
messages are stripped from Release builds and always visible in the VS
Output window when debugging without requiring a configured TraceListener
Aggregator.cs
- `FindTimeInterval`: moved early-return condition to its own line for
consistency with the rest of the file's brace style
---
.../Normalization/SpExecuteSqlExtractor.cs | 15 ++++----
.../Normalization/SqlTextNormalizer.cs | 36 ++++++++++---------
TraceEventImporter/Processing/Aggregator.cs | 3 +-
TraceEventImporter/Readers/XelFileReader.cs | 20 ++++++++---
4 files changed, 44 insertions(+), 30 deletions(-)
diff --git a/TraceEventImporter/Normalization/SpExecuteSqlExtractor.cs b/TraceEventImporter/Normalization/SpExecuteSqlExtractor.cs
index 18200d1e..5f1deda3 100644
--- a/TraceEventImporter/Normalization/SpExecuteSqlExtractor.cs
+++ b/TraceEventImporter/Normalization/SpExecuteSqlExtractor.cs
@@ -52,15 +52,14 @@ public static string TryExtractInnerSql(string textData)
// Look for N'...' or '...' pattern
while (i < len)
{
- // N'...' unicode string literal
- if (i < len - 1 && (textData[i] == 'N' || textData[i] == 'n') && textData[i + 1] == '\'')
+ // N'...' unicode string literal — check we're past the proc name
+ if (i < len - 1
+ && (textData[i] == 'N' || textData[i] == 'n')
+ && textData[i + 1] == '\''
+ && i > 0
+ && IsAfterProcName(textData, i))
{
- // Check if this is after the proc name (not part of it)
- // by verifying we've passed at least one space/comma
- if (i > 0 && IsAfterProcName(textData, i))
- {
- return ExtractQuotedString(textData, i + 2);
- }
+ return ExtractQuotedString(textData, i + 2);
}
// '...' regular string literal
diff --git a/TraceEventImporter/Normalization/SqlTextNormalizer.cs b/TraceEventImporter/Normalization/SqlTextNormalizer.cs
index 57cd7327..9c1451ac 100644
--- a/TraceEventImporter/Normalization/SqlTextNormalizer.cs
+++ b/TraceEventImporter/Normalization/SqlTextNormalizer.cs
@@ -82,18 +82,16 @@ public static string Normalize(string sql)
continue;
}
- // --- Binary literal: 0xHEX ---
- if (c == '0' && i + 1 < len && (sql[i + 1] == 'x' || sql[i + 1] == 'X'))
+ // --- Binary literal: 0xHEX — only when not part of an identifier ---
+ if (c == '0' && i + 1 < len
+ && (sql[i + 1] == 'x' || sql[i + 1] == 'X')
+ && (i == 0 || !IsIdentChar(sql[i - 1])))
{
- // Ensure it's not part of an identifier
- if (i == 0 || !IsIdentChar(sql[i - 1]))
- {
- i += 2;
- while (i < len && IsHexDigit(sql[i]))
- i++;
- AppendWithSpace(result, "{BS}");
- continue;
- }
+ i += 2;
+ while (i < len && IsHexDigit(sql[i]))
+ i++;
+ AppendWithSpace(result, "{BS}");
+ continue;
}
// --- GUID literal: {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} ---
@@ -272,7 +270,9 @@ private static void SkipStringLiteral(string sql, ref int i)
private static bool IsHexDigit(char c)
{
- return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F');
+ return (c >= '0' && c <= '9')
+ || (c >= 'a' && c <= 'f')
+ || (c >= 'A' && c <= 'F');
}
private static bool IsIdentChar(char c)
@@ -346,21 +346,23 @@ private static bool IsShowplanVariable(string sql, int pos)
// Matches [ExprNNNN] or [BMKNNNN] patterns
if (pos + 5 >= sql.Length) return false;
int i = pos + 1;
- string prefix = "";
+ var prefixBuilder = new StringBuilder();
while (i < sql.Length && char.IsLetter(sql[i]))
{
- prefix += sql[i];
+ prefixBuilder.Append(sql[i]);
i++;
}
- if (prefix.Length == 0) return false;
+ if (prefixBuilder.Length == 0)
+ return false;
- string upper = prefix.ToUpperInvariant();
+ string upper = prefixBuilder.ToString().ToUpperInvariant();
if (upper != "EXPR" && upper != "BMK" && upper != "UNION" && upper != "CONST")
return false;
// Must be followed by digits then ']'
- if (i >= sql.Length || !char.IsDigit(sql[i])) return false;
+ if (i >= sql.Length || !char.IsDigit(sql[i]))
+ return false;
while (i < sql.Length && char.IsDigit(sql[i])) i++;
return (i < sql.Length && sql[i] == ']');
}
diff --git a/TraceEventImporter/Processing/Aggregator.cs b/TraceEventImporter/Processing/Aggregator.cs
index b5c3dcf5..362a99e9 100644
--- a/TraceEventImporter/Processing/Aggregator.cs
+++ b/TraceEventImporter/Processing/Aggregator.cs
@@ -188,7 +188,8 @@ private void ComputeStmtAggs(List statements)
private int FindTimeInterval(DateTime? time)
{
- if (!time.HasValue || TimeIntervals.Count == 0) return 0;
+ if (!time.HasValue || TimeIntervals.Count == 0)
+ return 0;
DateTime t = time.Value;
foreach (var ti in TimeIntervals)
{
diff --git a/TraceEventImporter/Readers/XelFileReader.cs b/TraceEventImporter/Readers/XelFileReader.cs
index 5d1cdffd..d92f95cb 100644
--- a/TraceEventImporter/Readers/XelFileReader.cs
+++ b/TraceEventImporter/Readers/XelFileReader.cs
@@ -223,7 +223,10 @@ private static string GetFieldString(IXEvent xe, string fieldName)
if (xe.Fields != null && xe.Fields.TryGetValue(fieldName, out object val) && val != null)
return val.ToString();
}
- catch { }
+ catch (Exception ex)
+ {
+ System.Diagnostics.Debug.WriteLine($"[XelFileReader] GetFieldString('{fieldName}'): {ex.Message}");
+ }
return null;
}
@@ -234,7 +237,10 @@ private static string GetFieldString(IXEvent xe, string fieldName)
if (xe.Fields != null && xe.Fields.TryGetValue(fieldName, out object val) && val != null)
return Convert.ToInt64(val);
}
- catch { }
+ catch (Exception ex)
+ {
+ System.Diagnostics.Debug.WriteLine($"[XelFileReader] GetFieldInt64Nullable('{fieldName}'): {ex.Message}");
+ }
return null;
}
@@ -245,7 +251,10 @@ private static string GetActionString(IXEvent xe, string actionName)
if (xe.Actions != null && xe.Actions.TryGetValue(actionName, out object val) && val != null)
return val.ToString();
}
- catch { }
+ catch (Exception ex)
+ {
+ System.Diagnostics.Debug.WriteLine($"[XelFileReader] GetActionString('{actionName}'): {ex.Message}");
+ }
return null;
}
@@ -256,7 +265,10 @@ private static long GetActionInt64(IXEvent xe, string actionName)
if (xe.Actions != null && xe.Actions.TryGetValue(actionName, out object val) && val != null)
return Convert.ToInt64(val);
}
- catch { }
+ catch (Exception ex)
+ {
+ System.Diagnostics.Debug.WriteLine($"[XelFileReader] GetActionInt64('{actionName}'): {ex.Message}");
+ }
return 0;
}
From 79aa24f302361d58da7995371e313f8e6224c9d7 Mon Sep 17 00:00:00 2001
From: JosephPilov-MSFT <23519517+PiJoCoder@users.noreply.github.com>
Date: Fri, 8 May 2026 12:37:03 -0500
Subject: [PATCH 11/12] #513 #522 feat/fix: local server time import for
TraceEvent and ReadTrace importers
TraceEventImporter - new "Import events using local server time (not UTC)" option:
- Added OPTION_USE_LOCAL_SERVER_TIME option to TraceEventImporter, matching
the equivalent option already present in ReadTraceNexusImporter.
- Added GetLocalServerTimeOffset() which reads the UTC-to-local offset from
tbl_ServerProperties (primary) or tbl_server_times (fallback), mirroring
the behaviour of ReadTrace.exe -B flag used by ReadTraceNexusImporter.
- When the option is enabled, all event StartTime and EndTime values are
shifted by the UTC offset during import so the ReadTrace schema tables
store timestamps in local server time.
- Added WriteLocalTimeFlag() to TraceEventImporterPlugin that persists a
TimestampsAreLocalTime flag to tbl_ServerProperties, tbl_server_times,
and ReadTrace.tblMiscInfo so that SQLNexus_PostProcessing.sql knows to
copy StartTime/EndTime directly into the _local columns instead of
applying a second UTC offset conversion.
ReadTraceNexusImporter - WriteLocalTimeFlag() hardening:
- Added a WriteLocalTimeFlag() with a three storage targets:
tbl_ServerProperties, tbl_server_times,and ReadTrace.tblMiscInfo. So the flag is reliably stored even when PSSDIAG /
Log Scout diagnostic tables (tbl_ServerProperties, tbl_server_times)
do not yet exist at the time WriteLocalTimeFlag() is called.
SQLNexus_PostProcessing.sql - local time column population fixes:
- Changed the outer IF condition for the StartTime_local / EndTime_local
update block from checking PSSDIAG tables to checking ReadTrace tables
(tblBatches, tblStatements, tblConnections), so the update always runs
when ReadTrace data has been imported regardless of whether PSSDIAG
diagnostic tables are present.
- Added TimestampsAreLocalTime flag detection from tbl_ServerProperties
(primary), tbl_server_times (secondary), and ReadTrace.tblMiscInfo
(tertiary fallback). When the flag is set the _local columns are
populated by direct copy from StartTime/EndTime; otherwise the UTC
offset is applied as before.
- Wrapped both UTCOffset_in_Hours reads with
ISNULL(CONVERT(NUMERIC(3,0), ...), 0) and added a final
SET @utc_to_local_offset = ISNULL(@utc_to_local_offset, 0) guard to
prevent DATEADD returning NULL when PropertyValue is NULL.
fmImport.cs - importer load order fix:
- Assigned TraceEventImporter a deterministic priority of 150 in
OrderedImporterFiles() so it always loads after RowsetImportEngine (100)
and before ReadTrace (200). Previously it received file.GetHashCode() as
its sort key, which could place it before the Rowset Importer, causing
tbl_ServerProperties to not yet exist when GetLocalServerTimeOffset()
and WriteLocalTimeFlag() were called.
---
.../ReadTraceNexusImporter.cs | 75 ++++++++-
.../TraceEventImporterPlugin.cs | 148 ++++++++++++++++++
sqlnexus/SQLNexus_PostProcessing.sql | 101 +++++++++---
sqlnexus/fmImport.cs | 10 +-
4 files changed, 308 insertions(+), 26 deletions(-)
diff --git a/ReadTraceNexusImporter/ReadTraceNexusImporter.cs b/ReadTraceNexusImporter/ReadTraceNexusImporter.cs
index f67dadcf..a9d12883 100644
--- a/ReadTraceNexusImporter/ReadTraceNexusImporter.cs
+++ b/ReadTraceNexusImporter/ReadTraceNexusImporter.cs
@@ -50,6 +50,10 @@ public class ReadTraceNexusImporter : INexusImporter, INexusProgressReporter
"tbl_ServerProperties " +
"WHERE PropertyName = 'UTCOffset_in_Hours'";
+ // Flag written to the DB when timestamps are already in local server time.
+ // SQLNexus_PostProcessing.sql reads this to skip the UTC-to-local offset conversion.
+ internal const string LOCAL_TIME_FLAG_PROPERTY = "TimestampsAreLocalTime";
+
// Private members
private ArrayList knownRowsets = new ArrayList(); // List of the rowsets we know how to interpret
@@ -186,6 +190,67 @@ private long GetApproximateTotalRowsInserted()
}
}
+ ///
+ /// Writes a flag indicating that timestamps in the ReadTrace tables are already in local
+ /// server time. Writes to tbl_ServerProperties when it exists, and also sets a
+ /// TimestampsAreLocalTime column on tbl_server_times (adding the column
+ /// first if necessary) as a fallback for environments where tbl_ServerProperties
+ /// is not present. SQLNexus_PostProcessing.sql reads either location to skip the
+ /// UTC offset conversion when populating StartTime_local / EndTime_local.
+ ///
+ private void WriteLocalTimeFlag()
+ {
+ const string sql =
+ // --- tbl_ServerProperties (primary) ---
+ "IF OBJECT_ID('dbo.tbl_ServerProperties') IS NOT NULL " +
+ "BEGIN " +
+ " IF EXISTS (SELECT 1 FROM dbo.tbl_ServerProperties WHERE PropertyName = '" + LOCAL_TIME_FLAG_PROPERTY + "') " +
+ " UPDATE dbo.tbl_ServerProperties SET PropertyValue = '1' WHERE PropertyName = '" + LOCAL_TIME_FLAG_PROPERTY + "'; " +
+ " ELSE " +
+ " INSERT INTO dbo.tbl_ServerProperties (PropertyName, PropertyValue) VALUES ('" + LOCAL_TIME_FLAG_PROPERTY + "', '1'); " +
+ "END " +
+ // --- tbl_server_times (fallback) ---
+ // NOTE: the ALTER TABLE and the UPDATE must NOT be in the same batch because SQL
+ // Server compiles the whole batch before execution and would fail to resolve the
+ // new column at parse time. sp_executesql defers compilation of the UPDATE to
+ // after the ALTER TABLE has already run and the column is visible in the catalog.
+ "IF OBJECT_ID('dbo.tbl_server_times') IS NOT NULL " +
+ "BEGIN " +
+ " IF COL_LENGTH('dbo.tbl_server_times', '" + LOCAL_TIME_FLAG_PROPERTY + "') IS NULL " +
+ " ALTER TABLE dbo.tbl_server_times ADD [" + LOCAL_TIME_FLAG_PROPERTY + "] BIT NULL; " +
+ " IF COL_LENGTH('dbo.tbl_server_times', '" + LOCAL_TIME_FLAG_PROPERTY + "') IS NOT NULL " +
+ " EXEC sp_executesql N'UPDATE dbo.tbl_server_times SET [" + LOCAL_TIME_FLAG_PROPERTY + "] = 1'; " +
+ "END " +
+ // --- ReadTrace.tblMiscInfo (guaranteed fallback) ---
+ // This table is always created by the managed importer schema, so the flag is
+ // stored reliably even when PSSDIAG / Log Scout diagnostic tables are absent.
+ "IF OBJECT_ID('ReadTrace.tblMiscInfo') IS NOT NULL " +
+ "BEGIN " +
+ " IF EXISTS (SELECT 1 FROM ReadTrace.tblMiscInfo WHERE Attribute = '" + LOCAL_TIME_FLAG_PROPERTY + "') " +
+ " UPDATE ReadTrace.tblMiscInfo SET Value = '1' WHERE Attribute = '" + LOCAL_TIME_FLAG_PROPERTY + "'; " +
+ " ELSE " +
+ " INSERT INTO ReadTrace.tblMiscInfo (Attribute, Value) VALUES ('" + LOCAL_TIME_FLAG_PROPERTY + "', '1'); " +
+ "END";
+
+ using (SqlConnection cn = new SqlConnection(connStr))
+ {
+ try
+ {
+ cn.Open();
+ using (SqlCommand cmd = new SqlCommand(sql, cn))
+ {
+ cmd.CommandTimeout = 0;
+ cmd.ExecuteNonQuery();
+ }
+ Util.Logger.LogMessage("ReadTraceNexusImporter: Wrote '" + LOCAL_TIME_FLAG_PROPERTY + "' flag to tbl_ServerProperties, tbl_server_times, and/or ReadTrace.tblMiscInfo.");
+ }
+ catch (Exception e)
+ {
+ Util.Logger.LogMessage("ReadTraceNexusImporter: Could not write local time flag: " + e.Message);
+ }
+ }
+ }
+
private decimal GetLocalServerTimeOffset()
{
using (SqlConnection cn = new SqlConnection(connStr))
@@ -292,7 +357,7 @@ private string FileFirstXelFile(string[] files)
/// Cancel an in-progress load
/// Called by host to ask in importer abort an in-progress load. Can return before abort is complete;
- /// the host will wait until DoImport() returns.
+ /// the host will wait until DoImport() returns.
public void Cancel()
{
Cancelled = true;
@@ -312,7 +377,7 @@ public void Cancel()
State = ImportState.Idle;
}
- /// True if the import has been asked to cancel an in-progress load. Set by the Cancel method.
+ /// True if the import has been asked to cancel an in-progress load. Set by the Cancel method.
public bool Cancelled
{
get { return canceled; }
@@ -320,7 +385,7 @@ public bool Cancelled
}
/// Start import
- /// Initialize() will be called prior to DoImport()
+ /// Initialize() will be called prior to DoImport()
/// true if import succeeds, false otherwise
public bool DoImport()
{
@@ -451,7 +516,11 @@ public bool DoImport()
State = ImportState.Idle;
if (0 == processReadTrace.ExitCode)
+ {
+ if ((bool)this.options[OPTION_USE_LOCAL_SERVER_TIME])
+ WriteLocalTimeFlag();
return true;
+ }
else
return false;
}
diff --git a/TraceEventImporter/TraceEventImporterPlugin.cs b/TraceEventImporter/TraceEventImporterPlugin.cs
index c747c962..39124a37 100644
--- a/TraceEventImporter/TraceEventImporterPlugin.cs
+++ b/TraceEventImporter/TraceEventImporterPlugin.cs
@@ -28,6 +28,7 @@ public class TraceEventImporterPlugin : INexusImporter, INexusProgressReporter
private const string OPTION_ENABLED = "Enabled";
private const string OPTION_DROP_EXISTING = "Drop existing ReadTrace tables";
private const string OPTION_INTERVAL_SECONDS = "Aggregation interval (seconds)";
+ private const string OPTION_USE_LOCAL_SERVER_TIME = "Import events using local server time (not UTC)";
// Private state
private ILogger _logger;
@@ -51,6 +52,7 @@ public TraceEventImporterPlugin()
_options.Add(OPTION_DROP_EXISTING, true);
_options.Add(OPTION_INTERVAL_SECONDS, 60);
_options.Add(OPTION_ENABLED, true);
+ _options.Add(OPTION_USE_LOCAL_SERVER_TIME, false);
}
#region INexusImporter
@@ -149,6 +151,17 @@ public bool DoImport()
int intervalSeconds = Convert.ToInt32(_options[OPTION_INTERVAL_SECONDS]);
long globalSeq = 0;
+ // When importing with local server time, shift every event timestamp by the
+ // UTC-to-local offset so StartTime/EndTime are stored in local time, matching
+ // the behaviour of ReadTraceNexusImporter's -B flag passed to ReadTrace.exe.
+ TimeSpan localTimeOffset = TimeSpan.Zero;
+ if ((bool)_options[OPTION_USE_LOCAL_SERVER_TIME])
+ {
+ decimal offsetHours = GetLocalServerTimeOffset();
+ localTimeOffset = TimeSpan.FromHours((double)offsetHours);
+ LogMessage($"TraceEventImporter: Local server time offset = {offsetHours} hours; timestamps will be shifted accordingly.");
+ }
+
using (var writer = new BulkWriter(_connStr))
{
foreach (string file in files)
@@ -169,6 +182,14 @@ public bool DoImport()
{
if (_cancelled) break;
+ if (localTimeOffset != TimeSpan.Zero)
+ {
+ if (evt.StartTime.HasValue)
+ evt.StartTime = evt.StartTime.Value.Add(localTimeOffset);
+ if (evt.EndTime.HasValue)
+ evt.EndTime = evt.EndTime.Value.Add(localTimeOffset);
+ }
+
processor.ProcessEvent(evt);
_totalLinesProcessed++;
fileEventsRead++;
@@ -269,6 +290,8 @@ public bool DoImport()
RunPostLoadFixups();
LogMessage($"TraceEventImporter: Import complete. {_totalRowsInserted} total rows inserted.");
+ if ((bool)_options[OPTION_USE_LOCAL_SERVER_TIME])
+ WriteLocalTimeFlag();
State = ImportState.Idle;
return true;
}
@@ -326,6 +349,131 @@ private void RunPostLoadFixups()
ExecuteSqlScript(sql);
}
+ private const string LOCAL_SRV_TIME_QUERY =
+ "SELECT ISNULL(CONVERT(decimal, PropertyValue), 0) UtcToLocalOffset " +
+ "FROM tbl_ServerProperties " +
+ "WHERE PropertyName = 'UTCOffset_in_Hours'";
+
+ ///
+ /// Reads the UTC-to-local offset (in hours) from the database.
+ /// Returns 0 if the value cannot be determined.
+ ///
+ private decimal GetLocalServerTimeOffset()
+ {
+ // Primary: tbl_ServerProperties
+ try
+ {
+ using (var cn = new SqlConnection(_connStr))
+ {
+ cn.Open();
+ using (var cmd = new SqlCommand(LOCAL_SRV_TIME_QUERY, cn))
+ {
+ cmd.CommandTimeout = 0;
+ object result = cmd.ExecuteScalar();
+ if (result != null && result != DBNull.Value)
+ {
+ decimal offset = Convert.ToDecimal(result);
+ LogMessage("TraceEventImporter: UTC_Offset from tbl_ServerProperties: " + offset);
+ return offset;
+ }
+ }
+ }
+ }
+ catch (Exception e)
+ {
+ LogMessage("TraceEventImporter: Could not read UTC offset from tbl_ServerProperties: " + e.Message);
+ }
+
+ // Fallback: tbl_server_times (Log Scout captures without PSSDIAG)
+ try
+ {
+ using (var cn = new SqlConnection(_connStr))
+ {
+ cn.Open();
+ using (var cmd = new SqlCommand(
+ "SELECT TOP 1 ISNULL(time_delta_hours * -1, 0) FROM dbo.tbl_server_times", cn))
+ {
+ cmd.CommandTimeout = 0;
+ object result = cmd.ExecuteScalar();
+ if (result != null && result != DBNull.Value)
+ {
+ decimal offset = Convert.ToDecimal(result);
+ LogMessage("TraceEventImporter: UTC_Offset from tbl_server_times: " + offset);
+ return offset;
+ }
+ }
+ }
+ }
+ catch (Exception e)
+ {
+ LogMessage("TraceEventImporter: Could not read UTC offset from tbl_server_times: " + e.Message);
+ }
+
+ LogMessage("TraceEventImporter: UTC offset not found; defaulting to 0.");
+ return 0;
+ }
+
+ ///
+ /// Writes a flag to tbl_ServerProperties (when it exists) indicating that
+ /// timestamps in the ReadTrace tables are already in local server time.
+ /// SQLNexus_PostProcessing.sql reads this flag to skip the UTC offset conversion
+ /// when populating StartTime_local / EndTime_local.
+ ///
+ private void WriteLocalTimeFlag()
+ {
+ const string flagName = "TimestampsAreLocalTime";
+ const string sql =
+ // --- tbl_ServerProperties (primary) ---
+ "IF OBJECT_ID('dbo.tbl_ServerProperties') IS NOT NULL " +
+ "BEGIN " +
+ " IF EXISTS (SELECT 1 FROM dbo.tbl_ServerProperties WHERE PropertyName = '" + flagName + "') " +
+ " UPDATE dbo.tbl_ServerProperties SET PropertyValue = '1' WHERE PropertyName = '" + flagName + "'; " +
+ " ELSE " +
+ " INSERT INTO dbo.tbl_ServerProperties (PropertyName, PropertyValue) VALUES ('" + flagName + "', '1'); " +
+ "END " +
+ // --- tbl_server_times (fallback) ---
+ // NOTE: the ALTER TABLE and the UPDATE must NOT be in the same batch because SQL
+ // Server compiles the whole batch before execution and would fail to resolve the
+ // new column at parse time. sp_executesql defers compilation of the UPDATE to
+ // after the ALTER TABLE has already run and the column is visible in the catalog.
+ "IF OBJECT_ID('dbo.tbl_server_times') IS NOT NULL " +
+ "BEGIN " +
+ " IF COL_LENGTH('dbo.tbl_server_times', '" + flagName + "') IS NULL " +
+ " ALTER TABLE dbo.tbl_server_times ADD [" + flagName + "] BIT NULL; " +
+ " IF COL_LENGTH('dbo.tbl_server_times', '" + flagName + "') IS NOT NULL " +
+ " EXEC sp_executesql N'UPDATE dbo.tbl_server_times SET [" + flagName + "] = 1'; " +
+ "END " +
+ // --- ReadTrace.tblMiscInfo (guaranteed fallback) ---
+ // This table is always created by the managed importer schema, so the flag is
+ // stored reliably even when PSSDIAG / Log Scout diagnostic tables are absent
+ // (e.g. when only the Trace Event Importer is enabled without the Rowset Importer).
+ "IF OBJECT_ID('ReadTrace.tblMiscInfo') IS NOT NULL " +
+ "BEGIN " +
+ " IF EXISTS (SELECT 1 FROM ReadTrace.tblMiscInfo WHERE Attribute = '" + flagName + "') " +
+ " UPDATE ReadTrace.tblMiscInfo SET Value = '1' WHERE Attribute = '" + flagName + "'; " +
+ " ELSE " +
+ " INSERT INTO ReadTrace.tblMiscInfo (Attribute, Value) VALUES ('" + flagName + "', '1'); " +
+ "END";
+
+ using (var cn = new SqlConnection(_connStr))
+ {
+ try
+ {
+ cn.Open();
+ using (var cmd = new SqlCommand(sql, cn))
+ {
+ cmd.CommandTimeout = 0;
+ cmd.ExecuteNonQuery();
+ }
+ LogMessage("TraceEventImporter: Wrote '" + flagName + "' flag to tbl_ServerProperties, tbl_server_times, and/or ReadTrace.tblMiscInfo.");
+ }
+ catch (Exception e)
+ {
+ LogMessage("TraceEventImporter: Could not write local time flag: " + e.Message);
+ }
+ }
+ }
+
private void ExecuteSqlScript(string script)
{
string[] batches = Regex.Split(script, @"^\s*GO\s*$", RegexOptions.Multiline | RegexOptions.IgnoreCase);
diff --git a/sqlnexus/SQLNexus_PostProcessing.sql b/sqlnexus/SQLNexus_PostProcessing.sql
index c222f64d..7b0167b8 100644
--- a/sqlnexus/SQLNexus_PostProcessing.sql
+++ b/sqlnexus/SQLNexus_PostProcessing.sql
@@ -47,35 +47,83 @@ END;
GO
IF (
- (OBJECT_ID('tbl_ServerProperties') IS NOT NULL)
- OR (OBJECT_ID('tbl_server_times') IS NOT NULL)
+ (OBJECT_ID('[ReadTrace].[tblBatches]') IS NOT NULL)
+ OR (OBJECT_ID('[ReadTrace].[tblStatements]') IS NOT NULL)
+ OR (OBJECT_ID('[ReadTrace].[tblConnections]') IS NOT NULL)
)
BEGIN
- --get the offset from one of two possible tables
- DECLARE @utc_to_local_offset NUMERIC(3, 0) = 0;
+ -- Check whether the importer flagged that timestamps are already in local server time.
+ -- When true, StartTime/EndTime in the ReadTrace tables are already local, so
+ -- StartTime_local/EndTime_local should be a direct copy rather than an offset conversion.
+ DECLARE @timestamps_are_local BIT = 0;
+
+ IF OBJECT_ID('dbo.tbl_ServerProperties') IS NOT NULL
+ BEGIN
+ IF EXISTS (
+ SELECT 1 FROM dbo.tbl_ServerProperties
+ WHERE PropertyName = 'TimestampsAreLocalTime' AND PropertyValue = '1'
+ )
+ SET @timestamps_are_local = 1;
+ END;
+ ELSE IF OBJECT_ID('dbo.tbl_server_times') IS NOT NULL
+ AND COL_LENGTH('dbo.tbl_server_times', 'TimestampsAreLocalTime') IS NOT NULL
+ BEGIN
+ IF EXISTS (
+ SELECT 1 FROM dbo.tbl_server_times
+ WHERE [TimestampsAreLocalTime] = 1
+ )
+ SET @timestamps_are_local = 1;
+ END;
- IF OBJECT_ID('tbl_ServerProperties') IS NOT NULL
+ -- Fallback: ReadTrace.tblMiscInfo is written by both managed importers and is always
+ -- present even when PSSDIAG / Log Scout diagnostic tables (tbl_ServerProperties,
+ -- tbl_server_times) are absent (e.g. when only the Trace Event Importer is enabled).
+ IF @timestamps_are_local = 0 AND OBJECT_ID('ReadTrace.tblMiscInfo') IS NOT NULL
BEGIN
- SELECT @utc_to_local_offset = PropertyValue
- FROM dbo.tbl_ServerProperties
- WHERE PropertyName = 'UTCOffset_in_Hours';
+ IF EXISTS (
+ SELECT 1 FROM ReadTrace.tblMiscInfo
+ WHERE Attribute = 'TimestampsAreLocalTime' AND Value = '1'
+ )
+ SET @timestamps_are_local = 1;
END;
- ELSE IF OBJECT_ID('tbl_server_times') IS NOT NULL
+
+ --get the offset from one of two possible tables (defaults to 0 when neither is present)
+ DECLARE @utc_to_local_offset NUMERIC(3, 0) = 0;
+
+ IF @timestamps_are_local = 0
BEGIN
- SELECT TOP 1
- @utc_to_local_offset = time_delta_hours * -1
- FROM dbo.tbl_server_times;
+ IF OBJECT_ID('tbl_ServerProperties') IS NOT NULL
+ BEGIN
+ SELECT @utc_to_local_offset = ISNULL(CONVERT(NUMERIC(3, 0), PropertyValue), 0)
+ FROM dbo.tbl_ServerProperties
+ WHERE PropertyName = 'UTCOffset_in_Hours';
+ END;
+ ELSE IF OBJECT_ID('tbl_server_times') IS NOT NULL
+ BEGIN
+ SELECT TOP 1
+ @utc_to_local_offset = ISNULL(CONVERT(NUMERIC(3, 0), time_delta_hours * -1), 0)
+ FROM dbo.tbl_server_times;
+ END;
END;
+
+ -- Guard: ensure @utc_to_local_offset is never NULL (DATEADD returns NULL when number arg is NULL)
+ SET @utc_to_local_offset = ISNULL(@utc_to_local_offset, 0);
+
--update the new columns in tblBatches with local times
IF (
(COLUMNPROPERTY(OBJECT_ID('[ReadTrace].[tblBatches]'), 'StartTime_local', 'ColumnId') IS NOT NULL)
AND (COLUMNPROPERTY(OBJECT_ID('[ReadTrace].[tblBatches]'), 'EndTime_local', 'ColumnId') IS NOT NULL)
)
BEGIN
- UPDATE [ReadTrace].[tblBatches]
- SET StartTime_local = DATEADD(HOUR, @utc_to_local_offset, StartTime),
- EndTime_local = DATEADD(HOUR, @utc_to_local_offset, EndTime);
+ IF @timestamps_are_local = 1
+ UPDATE [ReadTrace].[tblBatches]
+ SET StartTime_local = StartTime,
+ EndTime_local = EndTime;
+ ELSE
+ UPDATE [ReadTrace].[tblBatches]
+ SET StartTime_local = DATEADD(HOUR, @utc_to_local_offset, StartTime),
+ EndTime_local = DATEADD(HOUR, @utc_to_local_offset, EndTime);
END;
--update the new columns in tblStatements with local times
@@ -84,9 +132,14 @@ BEGIN
AND (COLUMNPROPERTY(OBJECT_ID('[ReadTrace].[tblStatements]'), 'EndTime_local', 'ColumnId') IS NOT NULL)
)
BEGIN
- UPDATE [ReadTrace].[tblStatements]
- SET StartTime_local = DATEADD(HOUR, @utc_to_local_offset, StartTime),
- EndTime_local = DATEADD(HOUR, @utc_to_local_offset, EndTime);
+ IF @timestamps_are_local = 1
+ UPDATE [ReadTrace].[tblStatements]
+ SET StartTime_local = StartTime,
+ EndTime_local = EndTime;
+ ELSE
+ UPDATE [ReadTrace].[tblStatements]
+ SET StartTime_local = DATEADD(HOUR, @utc_to_local_offset, StartTime),
+ EndTime_local = DATEADD(HOUR, @utc_to_local_offset, EndTime);
END;
@@ -96,10 +149,14 @@ BEGIN
AND (COLUMNPROPERTY(OBJECT_ID('[ReadTrace].[tblConnections]'), 'EndTime_local', 'ColumnId') IS NOT NULL)
)
BEGIN
-
- UPDATE [ReadTrace].[tblConnections]
- SET StartTime_local = DATEADD(HOUR, @utc_to_local_offset, StartTime),
- EndTime_local = DATEADD(HOUR, @utc_to_local_offset, EndTime);
+ IF @timestamps_are_local = 1
+ UPDATE [ReadTrace].[tblConnections]
+ SET StartTime_local = StartTime,
+ EndTime_local = EndTime;
+ ELSE
+ UPDATE [ReadTrace].[tblConnections]
+ SET StartTime_local = DATEADD(HOUR, @utc_to_local_offset, StartTime),
+ EndTime_local = DATEADD(HOUR, @utc_to_local_offset, EndTime);
END;
END;
diff --git a/sqlnexus/fmImport.cs b/sqlnexus/fmImport.cs
index cac165a9..ad1d582b 100644
--- a/sqlnexus/fmImport.cs
+++ b/sqlnexus/fmImport.cs
@@ -506,7 +506,15 @@ private List OrderedImporterFiles(string[] files)
else if (file.ToUpper().Contains("READTRACE"))
{
ImporterList.Add(200, file);
-
+ }
+ else if (file.ToUpper().Contains("TRACEEVENTIMPORTER"))
+ {
+ // TraceEventImporter must run after RowsetImportEngine (100) so that
+ // tbl_ServerProperties already exists when GetLocalServerTimeOffset()
+ // and WriteLocalTimeFlag() are called. Place it between Rowset (100)
+ // and ReadTrace (200); ReadTrace and TraceEventImporter are mutually
+ // exclusive so they will never both be present at the same time.
+ ImporterList.Add(150, file);
}
else
{
From ed18339bb75c971f05d0526029d1bd3fa5ef9a9f Mon Sep 17 00:00:00 2001
From: JosephPilov-MSFT <23519517+PiJoCoder@users.noreply.github.com>
Date: Fri, 8 May 2026 13:59:23 -0500
Subject: [PATCH 12/12] fix(#513, #522): Always write local-time flag; rename
flag to ImportedTraceTimestampsInLocalTime
Changes
-------
ReadTraceNexusImporter.cs / TraceEventImporterPlugin.cs
- WriteLocalTimeFlag() now accepts a bool parameter and writes '1' (local time) or '0' (UTC) rather than only writing when the option is ON.
Previously the flag was absent when the option was OFF, leaving SQLNexus_PostProcessing.sql with no signal and therefore unable to
populate StartTime_local / EndTime_local reliably.
- WriteLocalTimeFlag() is now called unconditionally at the end of every successful import instead of being guarded by the option value.
- SQL values are written via SqlParameter (@flagStr VARCHAR, @flagBit BIT) instead of inline string concatenation, removing the concatenation
pattern and making the intent explicit.
- Flag property name renamed from TimestampsAreLocalTime to ImportedTraceTimestampsInLocalTime across tbl_ServerProperties,
tbl_server_times, and ReadTrace.tblMiscInfo. The new name makes clear that the value was recorded during import and describes trace-event
timestamps specifically.
SQLNexus_PostProcessing.sql
- All four references to TimestampsAreLocalTime updated to ImportedTraceTimestampsInLocalTime (tbl_ServerProperties WHERE,
tbl_server_times COL_LENGTH + WHERE, tblMiscInfo WHERE). Existing logic (check for Value = '1' to set @timestamps_are_local)
is unchanged; a stored '0' simply leaves the variable at its default of 0 and the DATEADD offset path runs as before.
---
.../ReadTraceNexusImporter.cs | 63 ++++++++++---------
.../TraceEventImporterPlugin.cs | 60 ++++++++++--------
sqlnexus/SQLNexus_PostProcessing.sql | 8 +--
3 files changed, 72 insertions(+), 59 deletions(-)
diff --git a/ReadTraceNexusImporter/ReadTraceNexusImporter.cs b/ReadTraceNexusImporter/ReadTraceNexusImporter.cs
index a9d12883..8ddf457d 100644
--- a/ReadTraceNexusImporter/ReadTraceNexusImporter.cs
+++ b/ReadTraceNexusImporter/ReadTraceNexusImporter.cs
@@ -50,9 +50,8 @@ public class ReadTraceNexusImporter : INexusImporter, INexusProgressReporter
"tbl_ServerProperties " +
"WHERE PropertyName = 'UTCOffset_in_Hours'";
- // Flag written to the DB when timestamps are already in local server time.
- // SQLNexus_PostProcessing.sql reads this to skip the UTC-to-local offset conversion.
- internal const string LOCAL_TIME_FLAG_PROPERTY = "TimestampsAreLocalTime";
+ // Flag name written to the DB to record whether timestamps are in local time or UTC.
+ // SQLNexus_PostProcessing.sql reads this value to choose the correct time conversion.
// Private members
@@ -191,45 +190,48 @@ private long GetApproximateTotalRowsInserted()
}
///
- /// Writes a flag indicating that timestamps in the ReadTrace tables are already in local
- /// server time. Writes to tbl_ServerProperties when it exists, and also sets a
- /// TimestampsAreLocalTime column on tbl_server_times (adding the column
- /// first if necessary) as a fallback for environments where tbl_ServerProperties
- /// is not present. SQLNexus_PostProcessing.sql reads either location to skip the
- /// UTC offset conversion when populating StartTime_local / EndTime_local.
+ /// Writes a flag recording whether timestamps in the ReadTrace tables are already in local
+ /// server time ( = true, value '1') or in UTC (false, '0').
+ /// Writes to tbl_ServerProperties when it exists, and also sets a
+ /// ImportedTraceTimestampsInLocalTime column on tbl_server_times (adding the column
+ /// first if necessary) as a fallback for environments where tbl_ServerProperties
+ /// is not present. SQLNexus_PostProcessing.sql reads either location to choose
+ /// the correct UTC offset conversion when populating StartTime_local /
+ /// EndTime_local.
///
- private void WriteLocalTimeFlag()
+ private void WriteLocalTimeFlag(bool isLocalTime)
{
+ // @flagStr = '1' or '0' for VARCHAR columns (tbl_ServerProperties, tblMiscInfo).
+ // @flagBit = 1 or 0 for the BIT column on tbl_server_times.
+ // sp_executesql's own @val parameter is fed from the outer @flagBit so the BIT
+ // update is also fully parameterised with no runtime concatenation.
const string sql =
// --- tbl_ServerProperties (primary) ---
"IF OBJECT_ID('dbo.tbl_ServerProperties') IS NOT NULL " +
"BEGIN " +
- " IF EXISTS (SELECT 1 FROM dbo.tbl_ServerProperties WHERE PropertyName = '" + LOCAL_TIME_FLAG_PROPERTY + "') " +
- " UPDATE dbo.tbl_ServerProperties SET PropertyValue = '1' WHERE PropertyName = '" + LOCAL_TIME_FLAG_PROPERTY + "'; " +
+ " IF EXISTS (SELECT 1 FROM dbo.tbl_ServerProperties WHERE PropertyName = 'ImportedTraceTimestampsInLocalTime') " +
+ " UPDATE dbo.tbl_ServerProperties SET PropertyValue = @flagStr WHERE PropertyName = 'ImportedTraceTimestampsInLocalTime'; " +
" ELSE " +
- " INSERT INTO dbo.tbl_ServerProperties (PropertyName, PropertyValue) VALUES ('" + LOCAL_TIME_FLAG_PROPERTY + "', '1'); " +
+ " INSERT INTO dbo.tbl_ServerProperties (PropertyName, PropertyValue) VALUES ('ImportedTraceTimestampsInLocalTime', @flagStr); " +
"END " +
// --- tbl_server_times (fallback) ---
- // NOTE: the ALTER TABLE and the UPDATE must NOT be in the same batch because SQL
- // Server compiles the whole batch before execution and would fail to resolve the
- // new column at parse time. sp_executesql defers compilation of the UPDATE to
- // after the ALTER TABLE has already run and the column is visible in the catalog.
+ // NOTE: ALTER TABLE and UPDATE must be in different batches; sp_executesql is
+ // used here so the UPDATE is compiled only after the column is already visible.
+ // The outer @flagBit parameter is forwarded into sp_executesql's own @val.
"IF OBJECT_ID('dbo.tbl_server_times') IS NOT NULL " +
"BEGIN " +
- " IF COL_LENGTH('dbo.tbl_server_times', '" + LOCAL_TIME_FLAG_PROPERTY + "') IS NULL " +
- " ALTER TABLE dbo.tbl_server_times ADD [" + LOCAL_TIME_FLAG_PROPERTY + "] BIT NULL; " +
- " IF COL_LENGTH('dbo.tbl_server_times', '" + LOCAL_TIME_FLAG_PROPERTY + "') IS NOT NULL " +
- " EXEC sp_executesql N'UPDATE dbo.tbl_server_times SET [" + LOCAL_TIME_FLAG_PROPERTY + "] = 1'; " +
+ " IF COL_LENGTH('dbo.tbl_server_times', 'ImportedTraceTimestampsInLocalTime') IS NULL " +
+ " ALTER TABLE dbo.tbl_server_times ADD [ImportedTraceTimestampsInLocalTime] BIT NULL; " +
+ " IF COL_LENGTH('dbo.tbl_server_times', 'ImportedTraceTimestampsInLocalTime') IS NOT NULL " +
+ " EXEC sp_executesql N'UPDATE dbo.tbl_server_times SET [ImportedTraceTimestampsInLocalTime] = @val', N'@val BIT', @val = @flagBit; " +
"END " +
// --- ReadTrace.tblMiscInfo (guaranteed fallback) ---
- // This table is always created by the managed importer schema, so the flag is
- // stored reliably even when PSSDIAG / Log Scout diagnostic tables are absent.
"IF OBJECT_ID('ReadTrace.tblMiscInfo') IS NOT NULL " +
"BEGIN " +
- " IF EXISTS (SELECT 1 FROM ReadTrace.tblMiscInfo WHERE Attribute = '" + LOCAL_TIME_FLAG_PROPERTY + "') " +
- " UPDATE ReadTrace.tblMiscInfo SET Value = '1' WHERE Attribute = '" + LOCAL_TIME_FLAG_PROPERTY + "'; " +
+ " IF EXISTS (SELECT 1 FROM ReadTrace.tblMiscInfo WHERE Attribute = 'ImportedTraceTimestampsInLocalTime') " +
+ " UPDATE ReadTrace.tblMiscInfo SET Value = @flagStr WHERE Attribute = 'ImportedTraceTimestampsInLocalTime'; " +
" ELSE " +
- " INSERT INTO ReadTrace.tblMiscInfo (Attribute, Value) VALUES ('" + LOCAL_TIME_FLAG_PROPERTY + "', '1'); " +
+ " INSERT INTO ReadTrace.tblMiscInfo (Attribute, Value) VALUES ('ImportedTraceTimestampsInLocalTime', @flagStr); " +
"END";
using (SqlConnection cn = new SqlConnection(connStr))
@@ -240,9 +242,11 @@ private void WriteLocalTimeFlag()
using (SqlCommand cmd = new SqlCommand(sql, cn))
{
cmd.CommandTimeout = 0;
+ cmd.Parameters.Add("@flagStr", SqlDbType.VarChar, 1).Value = isLocalTime ? "1" : "0";
+ cmd.Parameters.Add("@flagBit", SqlDbType.Bit).Value = isLocalTime;
cmd.ExecuteNonQuery();
}
- Util.Logger.LogMessage("ReadTraceNexusImporter: Wrote '" + LOCAL_TIME_FLAG_PROPERTY + "' flag to tbl_ServerProperties, tbl_server_times, and/or ReadTrace.tblMiscInfo.");
+ Util.Logger.LogMessage("ReadTraceNexusImporter: Wrote 'ImportedTraceTimestampsInLocalTime'=" + (isLocalTime ? "1" : "0") + " to tbl_ServerProperties, tbl_server_times, and/or ReadTrace.tblMiscInfo.");
}
catch (Exception e)
{
@@ -517,8 +521,9 @@ public bool DoImport()
if (0 == processReadTrace.ExitCode)
{
- if ((bool)this.options[OPTION_USE_LOCAL_SERVER_TIME])
- WriteLocalTimeFlag();
+ // Always write the flag ('1' = already local time, '0' = UTC) so that
+ // SQLNexus_PostProcessing.sql always has an explicit value to act on.
+ WriteLocalTimeFlag((bool)this.options[OPTION_USE_LOCAL_SERVER_TIME]);
return true;
}
else
diff --git a/TraceEventImporter/TraceEventImporterPlugin.cs b/TraceEventImporter/TraceEventImporterPlugin.cs
index 39124a37..c605eaad 100644
--- a/TraceEventImporter/TraceEventImporterPlugin.cs
+++ b/TraceEventImporter/TraceEventImporterPlugin.cs
@@ -290,8 +290,12 @@ public bool DoImport()
RunPostLoadFixups();
LogMessage($"TraceEventImporter: Import complete. {_totalRowsInserted} total rows inserted.");
- if ((bool)_options[OPTION_USE_LOCAL_SERVER_TIME])
- WriteLocalTimeFlag();
+
+ // Always write the flag ('1' = timestamps already in local time, '0' = timestamps
+ // are UTC). SQLNexus_PostProcessing.sql reads this value to choose between a direct
+ // copy and a DATEADD offset conversion when populating StartTime_local/EndTime_local.
+ WriteLocalTimeFlag((bool)_options[OPTION_USE_LOCAL_SERVER_TIME]);
+
State = ImportState.Idle;
return true;
}
@@ -414,45 +418,47 @@ private decimal GetLocalServerTimeOffset()
}
///
- /// Writes a flag to tbl_ServerProperties (when it exists) indicating that
- /// timestamps in the ReadTrace tables are already in local server time.
- /// SQLNexus_PostProcessing.sql reads this flag to skip the UTC offset conversion
- /// when populating StartTime_local / EndTime_local.
+ /// Writes a flag to ReadTrace.tblMiscInfo (and to tbl_ServerProperties /
+ /// tbl_server_times when present) that records whether timestamps in the ReadTrace
+ /// tables are already in local server time ( = true, value
+ /// '1') or in UTC (false, value '0').
+ /// SQLNexus_PostProcessing.sql reads this flag to choose between a direct copy
+ /// and a DATEADD offset conversion when populating
+ /// StartTime_local / EndTime_local.
///
- private void WriteLocalTimeFlag()
+ private void WriteLocalTimeFlag(bool isLocalTime)
{
- const string flagName = "TimestampsAreLocalTime";
+ // @flagStr = '1' or '0' for VARCHAR columns (tbl_ServerProperties, tblMiscInfo).
+ // @flagBit = 1 or 0 for the BIT column on tbl_server_times.
+ // sp_executesql's own @val parameter is fed from the outer @flagBit so the BIT
+ // update is also fully parameterised with no runtime concatenation.
const string sql =
// --- tbl_ServerProperties (primary) ---
"IF OBJECT_ID('dbo.tbl_ServerProperties') IS NOT NULL " +
"BEGIN " +
- " IF EXISTS (SELECT 1 FROM dbo.tbl_ServerProperties WHERE PropertyName = '" + flagName + "') " +
- " UPDATE dbo.tbl_ServerProperties SET PropertyValue = '1' WHERE PropertyName = '" + flagName + "'; " +
+ " IF EXISTS (SELECT 1 FROM dbo.tbl_ServerProperties WHERE PropertyName = 'ImportedTraceTimestampsInLocalTime') " +
+ " UPDATE dbo.tbl_ServerProperties SET PropertyValue = @flagStr WHERE PropertyName = 'ImportedTraceTimestampsInLocalTime'; " +
" ELSE " +
- " INSERT INTO dbo.tbl_ServerProperties (PropertyName, PropertyValue) VALUES ('" + flagName + "', '1'); " +
+ " INSERT INTO dbo.tbl_ServerProperties (PropertyName, PropertyValue) VALUES ('ImportedTraceTimestampsInLocalTime', @flagStr); " +
"END " +
// --- tbl_server_times (fallback) ---
- // NOTE: the ALTER TABLE and the UPDATE must NOT be in the same batch because SQL
- // Server compiles the whole batch before execution and would fail to resolve the
- // new column at parse time. sp_executesql defers compilation of the UPDATE to
- // after the ALTER TABLE has already run and the column is visible in the catalog.
+ // NOTE: ALTER TABLE and UPDATE must be in different batches; sp_executesql is
+ // used here so the UPDATE is compiled only after the column is already visible.
+ // The outer @flagBit parameter is forwarded into sp_executesql's own @val.
"IF OBJECT_ID('dbo.tbl_server_times') IS NOT NULL " +
"BEGIN " +
- " IF COL_LENGTH('dbo.tbl_server_times', '" + flagName + "') IS NULL " +
- " ALTER TABLE dbo.tbl_server_times ADD [" + flagName + "] BIT NULL; " +
- " IF COL_LENGTH('dbo.tbl_server_times', '" + flagName + "') IS NOT NULL " +
- " EXEC sp_executesql N'UPDATE dbo.tbl_server_times SET [" + flagName + "] = 1'; " +
+ " IF COL_LENGTH('dbo.tbl_server_times', 'ImportedTraceTimestampsInLocalTime') IS NULL " +
+ " ALTER TABLE dbo.tbl_server_times ADD [ImportedTraceTimestampsInLocalTime] BIT NULL; " +
+ " IF COL_LENGTH('dbo.tbl_server_times', 'ImportedTraceTimestampsInLocalTime') IS NOT NULL " +
+ " EXEC sp_executesql N'UPDATE dbo.tbl_server_times SET [ImportedTraceTimestampsInLocalTime] = @val', N'@val BIT', @val = @flagBit; " +
"END " +
// --- ReadTrace.tblMiscInfo (guaranteed fallback) ---
- // This table is always created by the managed importer schema, so the flag is
- // stored reliably even when PSSDIAG / Log Scout diagnostic tables are absent
- // (e.g. when only the Trace Event Importer is enabled without the Rowset Importer).
"IF OBJECT_ID('ReadTrace.tblMiscInfo') IS NOT NULL " +
"BEGIN " +
- " IF EXISTS (SELECT 1 FROM ReadTrace.tblMiscInfo WHERE Attribute = '" + flagName + "') " +
- " UPDATE ReadTrace.tblMiscInfo SET Value = '1' WHERE Attribute = '" + flagName + "'; " +
+ " IF EXISTS (SELECT 1 FROM ReadTrace.tblMiscInfo WHERE Attribute = 'ImportedTraceTimestampsInLocalTime') " +
+ " UPDATE ReadTrace.tblMiscInfo SET Value = @flagStr WHERE Attribute = 'ImportedTraceTimestampsInLocalTime'; " +
" ELSE " +
- " INSERT INTO ReadTrace.tblMiscInfo (Attribute, Value) VALUES ('" + flagName + "', '1'); " +
+ " INSERT INTO ReadTrace.tblMiscInfo (Attribute, Value) VALUES ('ImportedTraceTimestampsInLocalTime', @flagStr); " +
"END";
using (var cn = new SqlConnection(_connStr))
@@ -463,9 +469,11 @@ private void WriteLocalTimeFlag()
using (var cmd = new SqlCommand(sql, cn))
{
cmd.CommandTimeout = 0;
+ cmd.Parameters.AddWithValue("@flagStr", isLocalTime ? "1" : "0");
+ cmd.Parameters.AddWithValue("@flagBit", isLocalTime);
cmd.ExecuteNonQuery();
}
- LogMessage("TraceEventImporter: Wrote '" + flagName + "' flag to tbl_ServerProperties, tbl_server_times, and/or ReadTrace.tblMiscInfo.");
+ LogMessage("TraceEventImporter: Wrote 'ImportedTraceTimestampsInLocalTime'=" + (isLocalTime ? "1" : "0") + " to tbl_ServerProperties, tbl_server_times, and/or ReadTrace.tblMiscInfo.");
}
catch (Exception e)
{
diff --git a/sqlnexus/SQLNexus_PostProcessing.sql b/sqlnexus/SQLNexus_PostProcessing.sql
index 7b0167b8..8b6fc3c1 100644
--- a/sqlnexus/SQLNexus_PostProcessing.sql
+++ b/sqlnexus/SQLNexus_PostProcessing.sql
@@ -62,16 +62,16 @@ BEGIN
BEGIN
IF EXISTS (
SELECT 1 FROM dbo.tbl_ServerProperties
- WHERE PropertyName = 'TimestampsAreLocalTime' AND PropertyValue = '1'
+ WHERE PropertyName = 'ImportedTraceTimestampsInLocalTime' AND PropertyValue = '1'
)
SET @timestamps_are_local = 1;
END;
ELSE IF OBJECT_ID('dbo.tbl_server_times') IS NOT NULL
- AND COL_LENGTH('dbo.tbl_server_times', 'TimestampsAreLocalTime') IS NOT NULL
+ AND COL_LENGTH('dbo.tbl_server_times', 'ImportedTraceTimestampsInLocalTime') IS NOT NULL
BEGIN
IF EXISTS (
SELECT 1 FROM dbo.tbl_server_times
- WHERE [TimestampsAreLocalTime] = 1
+ WHERE [ImportedTraceTimestampsInLocalTime] = 1
)
SET @timestamps_are_local = 1;
END;
@@ -83,7 +83,7 @@ BEGIN
BEGIN
IF EXISTS (
SELECT 1 FROM ReadTrace.tblMiscInfo
- WHERE Attribute = 'TimestampsAreLocalTime' AND Value = '1'
+ WHERE Attribute = 'ImportedTraceTimestampsInLocalTime' AND Value = '1'
)
SET @timestamps_are_local = 1;
END;