diff --git a/proto/gen/rill/runtime/v1/connectors.pb.go b/proto/gen/rill/runtime/v1/connectors.pb.go index c9da2510be0..68e818179db 100644 --- a/proto/gen/rill/runtime/v1/connectors.pb.go +++ b/proto/gen/rill/runtime/v1/connectors.pb.go @@ -1496,6 +1496,163 @@ func (x *BigQueryListTablesResponse) GetNames() []string { return nil } +type ScanConnectorsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` +} + +func (x *ScanConnectorsRequest) Reset() { + *x = ScanConnectorsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_rill_runtime_v1_connectors_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScanConnectorsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScanConnectorsRequest) ProtoMessage() {} + +func (x *ScanConnectorsRequest) ProtoReflect() protoreflect.Message { + mi := &file_rill_runtime_v1_connectors_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScanConnectorsRequest.ProtoReflect.Descriptor instead. +func (*ScanConnectorsRequest) Descriptor() ([]byte, []int) { + return file_rill_runtime_v1_connectors_proto_rawDescGZIP(), []int{23} +} + +func (x *ScanConnectorsRequest) GetInstanceId() string { + if x != nil { + return x.InstanceId + } + return "" +} + +type ScanConnectorsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Connectors []*ScannedConnector `protobuf:"bytes,1,rep,name=connectors,proto3" json:"connectors,omitempty"` +} + +func (x *ScanConnectorsResponse) Reset() { + *x = ScanConnectorsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_rill_runtime_v1_connectors_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScanConnectorsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScanConnectorsResponse) ProtoMessage() {} + +func (x *ScanConnectorsResponse) ProtoReflect() protoreflect.Message { + mi := &file_rill_runtime_v1_connectors_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScanConnectorsResponse.ProtoReflect.Descriptor instead. +func (*ScanConnectorsResponse) Descriptor() ([]byte, []int) { + return file_rill_runtime_v1_connectors_proto_rawDescGZIP(), []int{24} +} + +func (x *ScanConnectorsResponse) GetConnectors() []*ScannedConnector { + if x != nil { + return x.Connectors + } + return nil +} + +type ScannedConnector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + HasAnonymousAccess bool `protobuf:"varint,3,opt,name=has_anonymous_access,json=hasAnonymousAccess,proto3" json:"has_anonymous_access,omitempty"` // reports whether access is present without any credentials +} + +func (x *ScannedConnector) Reset() { + *x = ScannedConnector{} + if protoimpl.UnsafeEnabled { + mi := &file_rill_runtime_v1_connectors_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScannedConnector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScannedConnector) ProtoMessage() {} + +func (x *ScannedConnector) ProtoReflect() protoreflect.Message { + mi := &file_rill_runtime_v1_connectors_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScannedConnector.ProtoReflect.Descriptor instead. +func (*ScannedConnector) Descriptor() ([]byte, []int) { + return file_rill_runtime_v1_connectors_proto_rawDescGZIP(), []int{25} +} + +func (x *ScannedConnector) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScannedConnector) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ScannedConnector) GetHasAnonymousAccess() bool { + if x != nil { + return x.HasAnonymousAccess + } + return false +} + var File_rill_runtime_v1_connectors_proto protoreflect.FileDescriptor var file_rill_runtime_v1_connectors_proto_rawDesc = []byte{ @@ -1690,108 +1847,132 @@ var file_rill_runtime_v1_connectors_proto_rawDesc = []byte{ 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x32, 0x88, 0x0b, 0x0a, 0x10, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x76, 0x0a, 0x0d, 0x53, 0x33, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x12, 0x25, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x33, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x33, 0x4c, 0x69, 0x73, 0x74, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x33, 0x2f, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x86, 0x01, 0x0a, 0x0d, 0x53, 0x33, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x72, 0x69, 0x6c, 0x6c, - 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x33, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x26, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x33, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, - 0x12, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x33, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2f, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x12, 0x99, 0x01, 0x0a, 0x13, 0x53, 0x33, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2b, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x33, 0x47, 0x65, 0x74, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x33, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, - 0x2f, 0x73, 0x33, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x94, 0x01, 0x0a, - 0x14, 0x53, 0x33, 0x47, 0x65, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2c, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x33, 0x47, 0x65, 0x74, 0x43, 0x72, 0x65, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x33, 0x47, 0x65, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x2f, 0x76, 0x31, 0x2f, - 0x73, 0x33, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x5f, 0x69, - 0x6e, 0x66, 0x6f, 0x12, 0x7a, 0x0a, 0x0e, 0x47, 0x43, 0x53, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x43, 0x53, 0x4c, 0x69, 0x73, 0x74, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, + 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x38, 0x0a, 0x15, 0x53, 0x63, + 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x49, 0x64, 0x22, 0x5b, 0x0a, 0x16, 0x53, 0x63, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, + 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x73, 0x22, 0x6c, 0x0a, 0x10, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x68, 0x61, 0x73, 0x5f, 0x61, 0x6e, 0x6f, 0x6e, 0x79, 0x6d, 0x6f, 0x75, 0x73, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x68, 0x61, 0x73, + 0x41, 0x6e, 0x6f, 0x6e, 0x79, 0x6d, 0x6f, 0x75, 0x73, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x32, + 0x88, 0x0c, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x7e, 0x0a, 0x0e, 0x53, 0x63, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x61, 0x6e, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, + 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x63, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, + 0x13, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x2f, + 0x73, 0x63, 0x61, 0x6e, 0x12, 0x76, 0x0a, 0x0d, 0x53, 0x33, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x33, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x72, + 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x33, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x76, + 0x31, 0x2f, 0x73, 0x33, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x86, 0x01, 0x0a, + 0x0d, 0x53, 0x33, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, + 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x33, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x33, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x33, 0x2f, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x99, 0x01, 0x0a, 0x13, 0x53, 0x33, 0x47, 0x65, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2b, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x47, 0x43, 0x53, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x12, 0x0f, - 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x63, 0x73, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, - 0x8a, 0x01, 0x0a, 0x0e, 0x47, 0x43, 0x53, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x12, 0x26, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x43, 0x53, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x72, 0x69, 0x6c, + 0x53, 0x33, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x72, 0x69, 0x6c, + 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x33, 0x47, + 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, + 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x33, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2f, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x94, 0x01, 0x0a, 0x14, 0x53, 0x33, 0x47, 0x65, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2c, 0x2e, 0x72, 0x69, 0x6c, + 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x33, 0x47, + 0x65, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x33, 0x47, 0x65, 0x74, + 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, + 0x17, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x33, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x7a, 0x0a, 0x0e, 0x47, 0x43, 0x53, 0x4c, + 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x43, 0x53, - 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, - 0x2f, 0x67, 0x63, 0x73, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2f, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x98, 0x01, 0x0a, - 0x15, 0x47, 0x43, 0x53, 0x47, 0x65, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x43, 0x53, 0x47, 0x65, 0x74, 0x43, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x43, 0x53, 0x47, 0x65, 0x74, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x12, 0x18, 0x2f, - 0x76, 0x31, 0x2f, 0x67, 0x63, 0x73, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x7a, 0x0a, 0x0e, 0x4f, 0x4c, 0x41, 0x50, 0x4c, - 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x2e, 0x72, 0x69, 0x6c, 0x6c, - 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x4c, 0x41, 0x50, - 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x27, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x4c, 0x41, 0x50, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x76, 0x31, 0x2f, 0x6f, 0x6c, 0x61, 0x70, 0x2f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x12, 0x92, 0x01, 0x0a, 0x14, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x12, 0x2c, 0x2e, 0x72, - 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, - 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, - 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x72, 0x69, 0x6c, - 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, + 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x43, 0x53, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x63, 0x73, 0x2f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x0e, 0x47, 0x43, 0x53, 0x4c, 0x69, 0x73, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x26, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x43, 0x53, 0x4c, 0x69, 0x73, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x43, 0x53, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, + 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x63, 0x73, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x12, 0x98, 0x01, 0x0a, 0x15, 0x47, 0x43, 0x53, 0x47, 0x65, 0x74, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x2e, 0x72, 0x69, + 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x43, + 0x53, 0x47, 0x65, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x72, 0x69, 0x6c, + 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x43, 0x53, + 0x47, 0x65, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x1a, 0x12, 0x18, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x63, 0x73, 0x2f, 0x63, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x7a, 0x0a, 0x0e, + 0x4f, 0x4c, 0x41, 0x50, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x26, + 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x4f, 0x4c, 0x41, 0x50, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x4c, 0x41, 0x50, 0x4c, 0x69, 0x73, + 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x76, 0x31, 0x2f, 0x6f, 0x6c, 0x61, + 0x70, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x92, 0x01, 0x0a, 0x14, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x17, 0x12, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2f, - 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x12, 0x42, 0x69, 0x67, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, - 0x2a, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x72, 0x69, - 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, - 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, - 0x12, 0x13, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x42, 0xb8, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x72, 0x69, - 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x69, 0x6c, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x72, 0x69, 0x6c, 0x6c, 0x2f, 0x72, 0x69, 0x6c, 0x6c, 0x2f, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x72, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x52, 0x52, 0x58, 0xaa, 0x02, 0x0f, 0x52, 0x69, 0x6c, - 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0f, 0x52, - 0x69, 0x6c, 0x6c, 0x5c, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, - 0x1b, 0x52, 0x69, 0x6c, 0x6c, 0x5c, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5c, 0x56, 0x31, - 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x11, 0x52, - 0x69, 0x6c, 0x6c, 0x3a, 0x3a, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x3a, 0x3a, 0x56, 0x31, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x12, 0x2c, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x69, 0x73, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2d, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x69, 0x67, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x12, 0x8a, 0x01, + 0x0a, 0x12, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x12, 0x2a, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, + 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2b, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x69, 0x67, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x42, 0xb8, 0x01, 0x0a, 0x13, 0x63, + 0x6f, 0x6d, 0x2e, 0x72, 0x69, 0x6c, 0x6c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x76, 0x31, 0x42, 0x0f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x72, 0x69, 0x6c, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x72, 0x69, 0x6c, 0x6c, 0x2f, + 0x72, 0x69, 0x6c, 0x6c, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x76, 0x31, 0x3b, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x52, 0x52, 0x58, 0xaa, + 0x02, 0x0f, 0x52, 0x69, 0x6c, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x56, + 0x31, 0xca, 0x02, 0x0f, 0x52, 0x69, 0x6c, 0x6c, 0x5c, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1b, 0x52, 0x69, 0x6c, 0x6c, 0x5c, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0xea, 0x02, 0x11, 0x52, 0x69, 0x6c, 0x6c, 0x3a, 0x3a, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1806,7 +1987,7 @@ func file_rill_runtime_v1_connectors_proto_rawDescGZIP() []byte { return file_rill_runtime_v1_connectors_proto_rawDescData } -var file_rill_runtime_v1_connectors_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_rill_runtime_v1_connectors_proto_msgTypes = make([]protoimpl.MessageInfo, 26) var file_rill_runtime_v1_connectors_proto_goTypes = []interface{}{ (*S3Object)(nil), // 0: rill.runtime.v1.S3Object (*S3ListBucketsRequest)(nil), // 1: rill.runtime.v1.S3ListBucketsRequest @@ -1831,39 +2012,45 @@ var file_rill_runtime_v1_connectors_proto_goTypes = []interface{}{ (*BigQueryListDatasetsResponse)(nil), // 20: rill.runtime.v1.BigQueryListDatasetsResponse (*BigQueryListTablesRequest)(nil), // 21: rill.runtime.v1.BigQueryListTablesRequest (*BigQueryListTablesResponse)(nil), // 22: rill.runtime.v1.BigQueryListTablesResponse - (*timestamppb.Timestamp)(nil), // 23: google.protobuf.Timestamp + (*ScanConnectorsRequest)(nil), // 23: rill.runtime.v1.ScanConnectorsRequest + (*ScanConnectorsResponse)(nil), // 24: rill.runtime.v1.ScanConnectorsResponse + (*ScannedConnector)(nil), // 25: rill.runtime.v1.ScannedConnector + (*timestamppb.Timestamp)(nil), // 26: google.protobuf.Timestamp } var file_rill_runtime_v1_connectors_proto_depIdxs = []int32{ - 23, // 0: rill.runtime.v1.S3Object.modified_on:type_name -> google.protobuf.Timestamp + 26, // 0: rill.runtime.v1.S3Object.modified_on:type_name -> google.protobuf.Timestamp 0, // 1: rill.runtime.v1.S3ListObjectsResponse.objects:type_name -> rill.runtime.v1.S3Object - 23, // 2: rill.runtime.v1.GCSObject.modified_on:type_name -> google.protobuf.Timestamp + 26, // 2: rill.runtime.v1.GCSObject.modified_on:type_name -> google.protobuf.Timestamp 9, // 3: rill.runtime.v1.GCSListObjectsResponse.objects:type_name -> rill.runtime.v1.GCSObject 18, // 4: rill.runtime.v1.OLAPListTablesResponse.tables:type_name -> rill.runtime.v1.TableInfo - 1, // 5: rill.runtime.v1.ConnectorService.S3ListBuckets:input_type -> rill.runtime.v1.S3ListBucketsRequest - 3, // 6: rill.runtime.v1.ConnectorService.S3ListObjects:input_type -> rill.runtime.v1.S3ListObjectsRequest - 5, // 7: rill.runtime.v1.ConnectorService.S3GetBucketMetadata:input_type -> rill.runtime.v1.S3GetBucketMetadataRequest - 7, // 8: rill.runtime.v1.ConnectorService.S3GetCredentialsInfo:input_type -> rill.runtime.v1.S3GetCredentialsInfoRequest - 10, // 9: rill.runtime.v1.ConnectorService.GCSListBuckets:input_type -> rill.runtime.v1.GCSListBucketsRequest - 12, // 10: rill.runtime.v1.ConnectorService.GCSListObjects:input_type -> rill.runtime.v1.GCSListObjectsRequest - 14, // 11: rill.runtime.v1.ConnectorService.GCSGetCredentialsInfo:input_type -> rill.runtime.v1.GCSGetCredentialsInfoRequest - 16, // 12: rill.runtime.v1.ConnectorService.OLAPListTables:input_type -> rill.runtime.v1.OLAPListTablesRequest - 19, // 13: rill.runtime.v1.ConnectorService.BigQueryListDatasets:input_type -> rill.runtime.v1.BigQueryListDatasetsRequest - 21, // 14: rill.runtime.v1.ConnectorService.BigQueryListTables:input_type -> rill.runtime.v1.BigQueryListTablesRequest - 2, // 15: rill.runtime.v1.ConnectorService.S3ListBuckets:output_type -> rill.runtime.v1.S3ListBucketsResponse - 4, // 16: rill.runtime.v1.ConnectorService.S3ListObjects:output_type -> rill.runtime.v1.S3ListObjectsResponse - 6, // 17: rill.runtime.v1.ConnectorService.S3GetBucketMetadata:output_type -> rill.runtime.v1.S3GetBucketMetadataResponse - 8, // 18: rill.runtime.v1.ConnectorService.S3GetCredentialsInfo:output_type -> rill.runtime.v1.S3GetCredentialsInfoResponse - 11, // 19: rill.runtime.v1.ConnectorService.GCSListBuckets:output_type -> rill.runtime.v1.GCSListBucketsResponse - 13, // 20: rill.runtime.v1.ConnectorService.GCSListObjects:output_type -> rill.runtime.v1.GCSListObjectsResponse - 15, // 21: rill.runtime.v1.ConnectorService.GCSGetCredentialsInfo:output_type -> rill.runtime.v1.GCSGetCredentialsInfoResponse - 17, // 22: rill.runtime.v1.ConnectorService.OLAPListTables:output_type -> rill.runtime.v1.OLAPListTablesResponse - 20, // 23: rill.runtime.v1.ConnectorService.BigQueryListDatasets:output_type -> rill.runtime.v1.BigQueryListDatasetsResponse - 22, // 24: rill.runtime.v1.ConnectorService.BigQueryListTables:output_type -> rill.runtime.v1.BigQueryListTablesResponse - 15, // [15:25] is the sub-list for method output_type - 5, // [5:15] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 25, // 5: rill.runtime.v1.ScanConnectorsResponse.connectors:type_name -> rill.runtime.v1.ScannedConnector + 23, // 6: rill.runtime.v1.ConnectorService.ScanConnectors:input_type -> rill.runtime.v1.ScanConnectorsRequest + 1, // 7: rill.runtime.v1.ConnectorService.S3ListBuckets:input_type -> rill.runtime.v1.S3ListBucketsRequest + 3, // 8: rill.runtime.v1.ConnectorService.S3ListObjects:input_type -> rill.runtime.v1.S3ListObjectsRequest + 5, // 9: rill.runtime.v1.ConnectorService.S3GetBucketMetadata:input_type -> rill.runtime.v1.S3GetBucketMetadataRequest + 7, // 10: rill.runtime.v1.ConnectorService.S3GetCredentialsInfo:input_type -> rill.runtime.v1.S3GetCredentialsInfoRequest + 10, // 11: rill.runtime.v1.ConnectorService.GCSListBuckets:input_type -> rill.runtime.v1.GCSListBucketsRequest + 12, // 12: rill.runtime.v1.ConnectorService.GCSListObjects:input_type -> rill.runtime.v1.GCSListObjectsRequest + 14, // 13: rill.runtime.v1.ConnectorService.GCSGetCredentialsInfo:input_type -> rill.runtime.v1.GCSGetCredentialsInfoRequest + 16, // 14: rill.runtime.v1.ConnectorService.OLAPListTables:input_type -> rill.runtime.v1.OLAPListTablesRequest + 19, // 15: rill.runtime.v1.ConnectorService.BigQueryListDatasets:input_type -> rill.runtime.v1.BigQueryListDatasetsRequest + 21, // 16: rill.runtime.v1.ConnectorService.BigQueryListTables:input_type -> rill.runtime.v1.BigQueryListTablesRequest + 24, // 17: rill.runtime.v1.ConnectorService.ScanConnectors:output_type -> rill.runtime.v1.ScanConnectorsResponse + 2, // 18: rill.runtime.v1.ConnectorService.S3ListBuckets:output_type -> rill.runtime.v1.S3ListBucketsResponse + 4, // 19: rill.runtime.v1.ConnectorService.S3ListObjects:output_type -> rill.runtime.v1.S3ListObjectsResponse + 6, // 20: rill.runtime.v1.ConnectorService.S3GetBucketMetadata:output_type -> rill.runtime.v1.S3GetBucketMetadataResponse + 8, // 21: rill.runtime.v1.ConnectorService.S3GetCredentialsInfo:output_type -> rill.runtime.v1.S3GetCredentialsInfoResponse + 11, // 22: rill.runtime.v1.ConnectorService.GCSListBuckets:output_type -> rill.runtime.v1.GCSListBucketsResponse + 13, // 23: rill.runtime.v1.ConnectorService.GCSListObjects:output_type -> rill.runtime.v1.GCSListObjectsResponse + 15, // 24: rill.runtime.v1.ConnectorService.GCSGetCredentialsInfo:output_type -> rill.runtime.v1.GCSGetCredentialsInfoResponse + 17, // 25: rill.runtime.v1.ConnectorService.OLAPListTables:output_type -> rill.runtime.v1.OLAPListTablesResponse + 20, // 26: rill.runtime.v1.ConnectorService.BigQueryListDatasets:output_type -> rill.runtime.v1.BigQueryListDatasetsResponse + 22, // 27: rill.runtime.v1.ConnectorService.BigQueryListTables:output_type -> rill.runtime.v1.BigQueryListTablesResponse + 17, // [17:28] is the sub-list for method output_type + 6, // [6:17] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_rill_runtime_v1_connectors_proto_init() } @@ -2148,6 +2335,42 @@ func file_rill_runtime_v1_connectors_proto_init() { return nil } } + file_rill_runtime_v1_connectors_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScanConnectorsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rill_runtime_v1_connectors_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScanConnectorsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rill_runtime_v1_connectors_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScannedConnector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -2155,7 +2378,7 @@ func file_rill_runtime_v1_connectors_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_rill_runtime_v1_connectors_proto_rawDesc, NumEnums: 0, - NumMessages: 23, + NumMessages: 26, NumExtensions: 0, NumServices: 1, }, diff --git a/proto/gen/rill/runtime/v1/connectors.pb.gw.go b/proto/gen/rill/runtime/v1/connectors.pb.gw.go index fc2a1430d9a..4d019602f01 100644 --- a/proto/gen/rill/runtime/v1/connectors.pb.gw.go +++ b/proto/gen/rill/runtime/v1/connectors.pb.gw.go @@ -31,6 +31,42 @@ var _ = runtime.String var _ = utilities.NewDoubleArray var _ = metadata.Join +var ( + filter_ConnectorService_ScanConnectors_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_ConnectorService_ScanConnectors_0(ctx context.Context, marshaler runtime.Marshaler, client ConnectorServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ScanConnectorsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ConnectorService_ScanConnectors_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ScanConnectors(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ConnectorService_ScanConnectors_0(ctx context.Context, marshaler runtime.Marshaler, server ConnectorServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ScanConnectorsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ConnectorService_ScanConnectors_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ScanConnectors(ctx, &protoReq) + return msg, metadata, err + +} + var ( filter_ConnectorService_S3ListBuckets_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) @@ -499,6 +535,31 @@ func local_request_ConnectorService_BigQueryListTables_0(ctx context.Context, ma // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterConnectorServiceHandlerFromEndpoint instead. func RegisterConnectorServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ConnectorServiceServer) error { + mux.Handle("GET", pattern_ConnectorService_ScanConnectors_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/rill.runtime.v1.ConnectorService/ScanConnectors", runtime.WithHTTPPathPattern("/v1/connectors/scan")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ConnectorService_ScanConnectors_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ConnectorService_ScanConnectors_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_ConnectorService_S3ListBuckets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -790,6 +851,28 @@ func RegisterConnectorServiceHandler(ctx context.Context, mux *runtime.ServeMux, // "ConnectorServiceClient" to call the correct interceptors. func RegisterConnectorServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ConnectorServiceClient) error { + mux.Handle("GET", pattern_ConnectorService_ScanConnectors_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/rill.runtime.v1.ConnectorService/ScanConnectors", runtime.WithHTTPPathPattern("/v1/connectors/scan")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ConnectorService_ScanConnectors_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ConnectorService_ScanConnectors_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_ConnectorService_S3ListBuckets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -1014,6 +1097,8 @@ func RegisterConnectorServiceHandlerClient(ctx context.Context, mux *runtime.Ser } var ( + pattern_ConnectorService_ScanConnectors_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "connectors", "scan"}, "")) + pattern_ConnectorService_S3ListBuckets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "s3", "buckets"}, "")) pattern_ConnectorService_S3ListObjects_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1", "s3", "bucket", "objects"}, "")) @@ -1036,6 +1121,8 @@ var ( ) var ( + forward_ConnectorService_ScanConnectors_0 = runtime.ForwardResponseMessage + forward_ConnectorService_S3ListBuckets_0 = runtime.ForwardResponseMessage forward_ConnectorService_S3ListObjects_0 = runtime.ForwardResponseMessage diff --git a/proto/gen/rill/runtime/v1/connectors.pb.validate.go b/proto/gen/rill/runtime/v1/connectors.pb.validate.go index 703380b4af8..209df01af34 100644 --- a/proto/gen/rill/runtime/v1/connectors.pb.validate.go +++ b/proto/gen/rill/runtime/v1/connectors.pb.validate.go @@ -2756,3 +2756,349 @@ var _ interface { Cause() error ErrorName() string } = BigQueryListTablesResponseValidationError{} + +// Validate checks the field values on ScanConnectorsRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScanConnectorsRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScanConnectorsRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScanConnectorsRequestMultiError, or nil if none found. +func (m *ScanConnectorsRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ScanConnectorsRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for InstanceId + + if len(errors) > 0 { + return ScanConnectorsRequestMultiError(errors) + } + + return nil +} + +// ScanConnectorsRequestMultiError is an error wrapping multiple validation +// errors returned by ScanConnectorsRequest.ValidateAll() if the designated +// constraints aren't met. +type ScanConnectorsRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScanConnectorsRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScanConnectorsRequestMultiError) AllErrors() []error { return m } + +// ScanConnectorsRequestValidationError is the validation error returned by +// ScanConnectorsRequest.Validate if the designated constraints aren't met. +type ScanConnectorsRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScanConnectorsRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScanConnectorsRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScanConnectorsRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScanConnectorsRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScanConnectorsRequestValidationError) ErrorName() string { + return "ScanConnectorsRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ScanConnectorsRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScanConnectorsRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScanConnectorsRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScanConnectorsRequestValidationError{} + +// Validate checks the field values on ScanConnectorsResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScanConnectorsResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScanConnectorsResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScanConnectorsResponseMultiError, or nil if none found. +func (m *ScanConnectorsResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ScanConnectorsResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetConnectors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScanConnectorsResponseValidationError{ + field: fmt.Sprintf("Connectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScanConnectorsResponseValidationError{ + field: fmt.Sprintf("Connectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScanConnectorsResponseValidationError{ + field: fmt.Sprintf("Connectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ScanConnectorsResponseMultiError(errors) + } + + return nil +} + +// ScanConnectorsResponseMultiError is an error wrapping multiple validation +// errors returned by ScanConnectorsResponse.ValidateAll() if the designated +// constraints aren't met. +type ScanConnectorsResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScanConnectorsResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScanConnectorsResponseMultiError) AllErrors() []error { return m } + +// ScanConnectorsResponseValidationError is the validation error returned by +// ScanConnectorsResponse.Validate if the designated constraints aren't met. +type ScanConnectorsResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScanConnectorsResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScanConnectorsResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScanConnectorsResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScanConnectorsResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScanConnectorsResponseValidationError) ErrorName() string { + return "ScanConnectorsResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e ScanConnectorsResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScanConnectorsResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScanConnectorsResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScanConnectorsResponseValidationError{} + +// Validate checks the field values on ScannedConnector with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ScannedConnector) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScannedConnector with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScannedConnectorMultiError, or nil if none found. +func (m *ScannedConnector) ValidateAll() error { + return m.validate(true) +} + +func (m *ScannedConnector) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for Type + + // no validation rules for HasAnonymousAccess + + if len(errors) > 0 { + return ScannedConnectorMultiError(errors) + } + + return nil +} + +// ScannedConnectorMultiError is an error wrapping multiple validation errors +// returned by ScannedConnector.ValidateAll() if the designated constraints +// aren't met. +type ScannedConnectorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScannedConnectorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScannedConnectorMultiError) AllErrors() []error { return m } + +// ScannedConnectorValidationError is the validation error returned by +// ScannedConnector.Validate if the designated constraints aren't met. +type ScannedConnectorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScannedConnectorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScannedConnectorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScannedConnectorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScannedConnectorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScannedConnectorValidationError) ErrorName() string { return "ScannedConnectorValidationError" } + +// Error satisfies the builtin error interface +func (e ScannedConnectorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScannedConnector.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScannedConnectorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScannedConnectorValidationError{} diff --git a/proto/gen/rill/runtime/v1/connectors_grpc.pb.go b/proto/gen/rill/runtime/v1/connectors_grpc.pb.go index ff27fb23b0d..d496778ab25 100644 --- a/proto/gen/rill/runtime/v1/connectors_grpc.pb.go +++ b/proto/gen/rill/runtime/v1/connectors_grpc.pb.go @@ -19,6 +19,7 @@ import ( const _ = grpc.SupportPackageIsVersion7 const ( + ConnectorService_ScanConnectors_FullMethodName = "/rill.runtime.v1.ConnectorService/ScanConnectors" ConnectorService_S3ListBuckets_FullMethodName = "/rill.runtime.v1.ConnectorService/S3ListBuckets" ConnectorService_S3ListObjects_FullMethodName = "/rill.runtime.v1.ConnectorService/S3ListObjects" ConnectorService_S3GetBucketMetadata_FullMethodName = "/rill.runtime.v1.ConnectorService/S3GetBucketMetadata" @@ -35,6 +36,10 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type ConnectorServiceClient interface { + // ScanConnectors scans the artifacts for connectors and returns information about + // the connectors referenced in the artifacts. The information includes name,type and + // credentials for the connector. + ScanConnectors(ctx context.Context, in *ScanConnectorsRequest, opts ...grpc.CallOption) (*ScanConnectorsResponse, error) // S3ListBuckets lists buckets accessible with the configured credentials. S3ListBuckets(ctx context.Context, in *S3ListBucketsRequest, opts ...grpc.CallOption) (*S3ListBucketsResponse, error) // S3ListBuckets lists objects for the given bucket. @@ -65,6 +70,15 @@ func NewConnectorServiceClient(cc grpc.ClientConnInterface) ConnectorServiceClie return &connectorServiceClient{cc} } +func (c *connectorServiceClient) ScanConnectors(ctx context.Context, in *ScanConnectorsRequest, opts ...grpc.CallOption) (*ScanConnectorsResponse, error) { + out := new(ScanConnectorsResponse) + err := c.cc.Invoke(ctx, ConnectorService_ScanConnectors_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *connectorServiceClient) S3ListBuckets(ctx context.Context, in *S3ListBucketsRequest, opts ...grpc.CallOption) (*S3ListBucketsResponse, error) { out := new(S3ListBucketsResponse) err := c.cc.Invoke(ctx, ConnectorService_S3ListBuckets_FullMethodName, in, out, opts...) @@ -159,6 +173,10 @@ func (c *connectorServiceClient) BigQueryListTables(ctx context.Context, in *Big // All implementations must embed UnimplementedConnectorServiceServer // for forward compatibility type ConnectorServiceServer interface { + // ScanConnectors scans the artifacts for connectors and returns information about + // the connectors referenced in the artifacts. The information includes name,type and + // credentials for the connector. + ScanConnectors(context.Context, *ScanConnectorsRequest) (*ScanConnectorsResponse, error) // S3ListBuckets lists buckets accessible with the configured credentials. S3ListBuckets(context.Context, *S3ListBucketsRequest) (*S3ListBucketsResponse, error) // S3ListBuckets lists objects for the given bucket. @@ -186,6 +204,9 @@ type ConnectorServiceServer interface { type UnimplementedConnectorServiceServer struct { } +func (UnimplementedConnectorServiceServer) ScanConnectors(context.Context, *ScanConnectorsRequest) (*ScanConnectorsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ScanConnectors not implemented") +} func (UnimplementedConnectorServiceServer) S3ListBuckets(context.Context, *S3ListBucketsRequest) (*S3ListBucketsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method S3ListBuckets not implemented") } @@ -229,6 +250,24 @@ func RegisterConnectorServiceServer(s grpc.ServiceRegistrar, srv ConnectorServic s.RegisterService(&ConnectorService_ServiceDesc, srv) } +func _ConnectorService_ScanConnectors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ScanConnectorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConnectorServiceServer).ScanConnectors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ConnectorService_ScanConnectors_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConnectorServiceServer).ScanConnectors(ctx, req.(*ScanConnectorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _ConnectorService_S3ListBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(S3ListBucketsRequest) if err := dec(in); err != nil { @@ -416,6 +455,10 @@ var ConnectorService_ServiceDesc = grpc.ServiceDesc{ ServiceName: "rill.runtime.v1.ConnectorService", HandlerType: (*ConnectorServiceServer)(nil), Methods: []grpc.MethodDesc{ + { + MethodName: "ScanConnectors", + Handler: _ConnectorService_ScanConnectors_Handler, + }, { MethodName: "S3ListBuckets", Handler: _ConnectorService_S3ListBuckets_Handler, diff --git a/proto/gen/rill/runtime/v1/runtime.swagger.yaml b/proto/gen/rill/runtime/v1/runtime.swagger.yaml index e1b4e1baf19..f517b26e096 100644 --- a/proto/gen/rill/runtime/v1/runtime.swagger.yaml +++ b/proto/gen/rill/runtime/v1/runtime.swagger.yaml @@ -98,6 +98,29 @@ paths: $ref: '#/definitions/rpcStatus' tags: - RuntimeService + /v1/connectors/scan: + get: + summary: |- + ScanConnectors scans the artifacts for connectors and returns information about + the connectors referenced in the artifacts. The information includes name,type and + credentials for the connector. + operationId: ConnectorService_ScanConnectors + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/v1ScanConnectorsResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpcStatus' + parameters: + - name: instanceId + in: query + required: false + type: string + tags: + - ConnectorService /v1/delete-and-reconcile: post: summary: DeleteFileAndReconcile combines RenameFile and Reconcile in a single endpoint to reduce latency. @@ -4476,6 +4499,24 @@ definitions: format: int64 isDir: type: boolean + v1ScanConnectorsResponse: + type: object + properties: + connectors: + type: array + items: + type: object + $ref: '#/definitions/v1ScannedConnector' + v1ScannedConnector: + type: object + properties: + name: + type: string + type: + type: string + hasAnonymousAccess: + type: boolean + title: reports whether access is present without any credentials v1Schedule: type: object properties: diff --git a/proto/rill/runtime/v1/connectors.proto b/proto/rill/runtime/v1/connectors.proto index 2526875890f..b778776868b 100644 --- a/proto/rill/runtime/v1/connectors.proto +++ b/proto/rill/runtime/v1/connectors.proto @@ -6,6 +6,13 @@ import "google/protobuf/timestamp.proto"; import "validate/validate.proto"; service ConnectorService { + // ScanConnectors scans the artifacts for connectors and returns information about + // the connectors referenced in the artifacts. The information includes name,type and + // credentials for the connector. + rpc ScanConnectors(ScanConnectorsRequest) returns (ScanConnectorsResponse) { + option (google.api.http) = {get: "/v1/connectors/scan"}; + } + // S3ListBuckets lists buckets accessible with the configured credentials. rpc S3ListBuckets(S3ListBucketsRequest) returns (S3ListBucketsResponse) { option (google.api.http) = {get: "/v1/s3/buckets"}; @@ -197,3 +204,17 @@ message BigQueryListTablesResponse { string next_page_token = 1; repeated string names = 2; } + +message ScanConnectorsRequest { + string instance_id = 1; +} + +message ScanConnectorsResponse { + repeated ScannedConnector connectors = 1; +} + +message ScannedConnector { + string name = 1; + string type = 2; + bool has_anonymous_access = 3; // reports whether access is present without any credentials +} \ No newline at end of file diff --git a/runtime/catalog_cache.go b/runtime/catalog_cache.go index 6e22cf5d16e..da296b17891 100644 --- a/runtime/catalog_cache.go +++ b/runtime/catalog_cache.go @@ -91,7 +91,7 @@ func (c *catalogCache) close(ctx context.Context) error { // Unlike other catalog functions, it is safe to call flush concurrently with calls to get and list (i.e. under a read lock). func (c *catalogCache) flush(ctx context.Context) error { for s, n := range c.dirty { - r, err := c.get(n, true) + r, err := c.get(n, true, false) if err != nil { if !errors.Is(err, drivers.ErrResourceNotFound) { return fmt.Errorf("flush: unexpected error from get: %w", err) @@ -135,19 +135,45 @@ func (c *catalogCache) checkLeader(ctx context.Context) error { return nil } +// get returns a resource from the catalog. +// Unlike other catalog functions, it is safe to call get concurrently with calls to list and flush (i.e. under a read lock). +func (c *catalogCache) get(n *runtimev1.ResourceName, withDeleted, clone bool) (*runtimev1.Resource, error) { + rs := c.resources[n.Kind] + if rs == nil { + return nil, drivers.ErrResourceNotFound + } + r, ok := rs[strings.ToLower(n.Name)] + if !ok { + return nil, drivers.ErrResourceNotFound + } + if r.Meta.DeletedOn != nil && !withDeleted { + return nil, drivers.ErrResourceNotFound + } + if clone { + return c.clone(r), nil + } + return r, nil +} + // list returns a list of resources in the catalog. // Unlike other catalog functions, it is safe to call list concurrently with calls to get and flush (i.e. under a read lock). -func (c *catalogCache) list(kind string, withDeleted bool) ([]*runtimev1.Resource, error) { +func (c *catalogCache) list(kind string, withDeleted, clone bool) ([]*runtimev1.Resource, error) { if kind != "" { n := len(c.resources[kind]) res := make([]*runtimev1.Resource, 0, n) if withDeleted { for _, r := range c.resources[kind] { + if clone { + r = c.clone(r) + } res = append(res, r) } } else { for _, r := range c.resources[kind] { if r.Meta.DeletedOn == nil { + if clone { + r = c.clone(r) + } res = append(res, r) } } @@ -165,6 +191,9 @@ func (c *catalogCache) list(kind string, withDeleted bool) ([]*runtimev1.Resourc if withDeleted { for _, rs := range c.resources { for _, r := range rs { + if clone { + r = c.clone(r) + } res = append(res, r) } } @@ -172,6 +201,9 @@ func (c *catalogCache) list(kind string, withDeleted bool) ([]*runtimev1.Resourc for _, rs := range c.resources { for _, r := range rs { if r.Meta.DeletedOn == nil { + if clone { + r = c.clone(r) + } res = append(res, r) } } @@ -181,28 +213,12 @@ func (c *catalogCache) list(kind string, withDeleted bool) ([]*runtimev1.Resourc return res, nil } -// get returns a resource from the catalog. -// Unlike other catalog functions, it is safe to call get concurrently with calls to list and flush (i.e. under a read lock). -func (c *catalogCache) get(n *runtimev1.ResourceName, withDeleted bool) (*runtimev1.Resource, error) { - rs := c.resources[n.Kind] - if rs == nil { - return nil, drivers.ErrResourceNotFound - } - r, ok := rs[strings.ToLower(n.Name)] - if !ok { - return nil, drivers.ErrResourceNotFound - } - if r.Meta.DeletedOn != nil && !withDeleted { - return nil, drivers.ErrResourceNotFound - } - return r, nil -} - // create creates a resource in the catalog. // It will error if a resource with the same name already exists. // If a soft-deleted resource exists with the same name, it will be overwritten (no longer deleted). +// The passed resource should only have its spec populated. The meta and state fields will be populated by this function. func (c *catalogCache) create(name *runtimev1.ResourceName, refs []*runtimev1.ResourceName, owner *runtimev1.ResourceName, paths []string, r *runtimev1.Resource) error { - existing, _ := c.get(name, true) + existing, _ := c.get(name, true, false) if existing != nil { if existing.Meta.DeletedOn == nil { return drivers.ErrResourceAlreadyExists @@ -222,6 +238,10 @@ func (c *catalogCache) create(name *runtimev1.ResourceName, refs []*runtimev1.Re r.Meta.Version = existing.Meta.Version + 1 r.Meta.SpecVersion = existing.Meta.SpecVersion + 1 } + err := c.ctrl.reconciler(name.Kind).ResetState(r) + if err != nil { + return err + } c.link(r) c.dirty[nameStr(r.Meta.Name)] = r.Meta.Name c.addEvent(name, r, runtimev1.ResourceEvent_RESOURCE_EVENT_WRITE) @@ -230,7 +250,7 @@ func (c *catalogCache) create(name *runtimev1.ResourceName, refs []*runtimev1.Re // rename renames a resource in the catalog and sets the r.Meta.RenamedFrom field. func (c *catalogCache) rename(name, newName *runtimev1.ResourceName) error { - r, err := c.get(name, false) + r, err := c.get(name, false, false) if err != nil { return err } @@ -250,7 +270,7 @@ func (c *catalogCache) rename(name, newName *runtimev1.ResourceName) error { // clearRenamedFrom clears the r.Meta.RenamedFrom field without bumping version numbers. func (c *catalogCache) clearRenamedFrom(name *runtimev1.ResourceName) error { - r, err := c.get(name, false) + r, err := c.get(name, false, false) if err != nil { return err } @@ -267,7 +287,7 @@ func (c *catalogCache) clearRenamedFrom(name *runtimev1.ResourceName) error { // updateMeta updates the meta fields of a resource. func (c *catalogCache) updateMeta(name *runtimev1.ResourceName, refs []*runtimev1.ResourceName, owner *runtimev1.ResourceName, paths []string) error { - r, err := c.get(name, false) + r, err := c.get(name, false, false) if err != nil { return err } @@ -285,13 +305,13 @@ func (c *catalogCache) updateMeta(name *runtimev1.ResourceName, refs []*runtimev } // updateSpec updates the spec field of a resource. +// It uses the spec from the passed resource and disregards its other fields. func (c *catalogCache) updateSpec(name *runtimev1.ResourceName, from *runtimev1.Resource) error { - r, err := c.get(name, false) + r, err := c.get(name, false, false) if err != nil { return err } // NOTE: No need to unlink/link because no indexed fields are edited. - err = c.ctrl.reconciler(name.Kind).AssignSpec(from, r) if err != nil { return err @@ -305,8 +325,9 @@ func (c *catalogCache) updateSpec(name *runtimev1.ResourceName, from *runtimev1. } // updateState updates the state field of a resource. +// It uses the state from the passed resource and disregards its other fields. func (c *catalogCache) updateState(name *runtimev1.ResourceName, from *runtimev1.Resource) error { - r, err := c.get(name, false) + r, err := c.get(name, false, false) if err != nil { return err } @@ -325,7 +346,7 @@ func (c *catalogCache) updateState(name *runtimev1.ResourceName, from *runtimev1 // updateError updates the reconcile_error field of a resource. func (c *catalogCache) updateError(name *runtimev1.ResourceName, reconcileErr error) error { - r, err := c.get(name, false) + r, err := c.get(name, false, false) if err != nil { return err } @@ -342,7 +363,7 @@ func (c *catalogCache) updateError(name *runtimev1.ResourceName, reconcileErr er // updateDeleted sets the deleted_on field of a resource (a soft delete). // Afterwards, the resource can still be accessed by passing withDeleted to the getters. func (c *catalogCache) updateDeleted(name *runtimev1.ResourceName) error { - r, err := c.get(name, false) + r, err := c.get(name, false, false) if err != nil { return err } @@ -360,7 +381,7 @@ func (c *catalogCache) updateDeleted(name *runtimev1.ResourceName) error { // updateStatus updates the ephemeral status fields on a resource. // The values of these fields are reset next time a catalog cache is created. func (c *catalogCache) updateStatus(name *runtimev1.ResourceName, status runtimev1.ReconcileStatus, reconcileOn time.Time) error { - r, err := c.get(name, true) + r, err := c.get(name, true, false) if err != nil { return err } @@ -377,7 +398,7 @@ func (c *catalogCache) updateStatus(name *runtimev1.ResourceName, status runtime // delete permanently deletes a resource from the catalog (a hard delete). // Afterwards, the resource can no longer be accessed. func (c *catalogCache) delete(name *runtimev1.ResourceName) error { - r, err := c.get(name, true) + r, err := c.get(name, true, false) if err != nil { return err } @@ -424,12 +445,17 @@ func (c *catalogCache) unlink(r *runtimev1.Resource) { delete(c.renamed, s) } +// clone clones a resource such that it is safe to mutate without affecting a cached resource. +func (c *catalogCache) clone(r *runtimev1.Resource) *runtimev1.Resource { + return proto.Clone(r).(*runtimev1.Resource) +} + // retryCyclicRefs attempts to re-link resources into the DAG that were previously rejected due to cyclic references. // It returns a list of resource names that were successfully linked into the DAG. func (c *catalogCache) retryCyclicRefs() []*runtimev1.ResourceName { var res []*runtimev1.ResourceName for s, n := range c.cyclic { - r, err := c.get(n, false) + r, err := c.get(n, false, false) if err != nil { panic(err) } diff --git a/runtime/compilers/rillv1/parse_dotenv.go b/runtime/compilers/rillv1/parse_dotenv.go new file mode 100644 index 00000000000..92a5befde02 --- /dev/null +++ b/runtime/compilers/rillv1/parse_dotenv.go @@ -0,0 +1,26 @@ +package rillv1 + +import ( + "context" + "os" + + "github.com/joho/godotenv" +) + +// parseDotEnv parses the env file present at repo root +func (p *Parser) parseDotEnv(ctx context.Context, path string) error { + data, err := p.Repo.Get(ctx, path) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + envMap, err := godotenv.Unmarshal(data) + if err != nil { + return err + } + + p.DotEnv = envMap + return nil +} diff --git a/runtime/compilers/rillv1/parser.go b/runtime/compilers/rillv1/parser.go index 5d87f9a58fc..f68c631933a 100644 --- a/runtime/compilers/rillv1/parser.go +++ b/runtime/compilers/rillv1/parser.go @@ -108,6 +108,7 @@ type Diff struct { Added []ResourceName Modified []ResourceName ModifiedRillYAML bool + ModifiedDotEnv bool Deleted []ResourceName } @@ -122,6 +123,7 @@ type Parser struct { // Output RillYAML *RillYAML + DotEnv map[string]string Resources map[ResourceName]*Resource Errors []*runtimev1.ParseError @@ -208,6 +210,8 @@ func (p *Parser) Reparse(ctx context.Context, paths []string) (*Diff, error) { var deletedResources []*Resource // Resources deleted in Phase 1 (some may be added back in Phase 2) checkPaths := slices.Clone(paths) // Paths we should visit in the loop seenPaths := make(map[string]bool) // Paths already visited by the loop + modifiedRillYAML := false // whether rill.yaml file was modified + modifiedDotEnv := false // whether .env file was modified for i := 0; i < len(checkPaths); i++ { // Don't check the same path twice path := normalizePath(checkPaths[i]) @@ -216,10 +220,11 @@ func (p *Parser) Reparse(ctx context.Context, paths []string) (*Diff, error) { } seenPaths[path] = true - // Skip files that aren't SQL or YAML + // Skip files that aren't SQL or YAML or .env file isSQL := strings.HasSuffix(path, ".sql") isYAML := strings.HasSuffix(path, ".yaml") || strings.HasSuffix(path, ".yml") - if !isSQL && !isYAML { + isDotEnv := strings.EqualFold(path, "/.env") + if !isSQL && !isYAML && !isDotEnv { continue } @@ -231,9 +236,13 @@ func (p *Parser) Reparse(ctx context.Context, paths []string) (*Diff, error) { return nil, fmt.Errorf("unexpected file stat error: %w", err) } - // Check if path is rill.yaml and clear it (so we can re-parse it) + // Check if path is rill.yaml or .env and clear it (so we can re-parse it) if path == "/rill.yaml" || path == "/rill.yml" { + modifiedRillYAML = true p.RillYAML = nil + } else if path == "/.env" { + modifiedDotEnv = true + p.DotEnv = nil } // Since .sql and .yaml files provide context for each other, if one was modified, we need to reparse both. @@ -273,9 +282,6 @@ func (p *Parser) Reparse(ctx context.Context, paths []string) (*Diff, error) { } } - // Capture if rill.yaml will be updated - modifiedRillYAML := p.RillYAML == nil - // Phase 2: Parse (or reparse) the related paths, adding back resources err := p.parsePaths(ctx, parsePaths) if err != nil { @@ -318,7 +324,10 @@ func (p *Parser) Reparse(ctx context.Context, paths []string) (*Diff, error) { } // Phase 3: Build the diff using p.insertedResources, p.updatedResources and deletedResources - diff := &Diff{ModifiedRillYAML: modifiedRillYAML} + diff := &Diff{ + ModifiedRillYAML: modifiedRillYAML, + ModifiedDotEnv: modifiedDotEnv, + } for _, resource := range p.insertedResources { addedBack := false for _, deleted := range deletedResources { @@ -360,7 +369,7 @@ func (p *Parser) parsePaths(ctx context.Context, paths []string) error { // Then iterate over the sorted paths, processing all paths with the same stem at once (stem = path without extension). slices.Sort(paths) for i := 0; i < len(paths); { - // Handle rill.yaml separately (if parsing of rill.yaml fails, we exit early instead of adding a ParseError) + // Handle rill.yaml and .env separately (if parsing of rill.yaml fails, we exit early instead of adding a ParseError) path := paths[i] if path == "/rill.yaml" || path == "/rill.yml" { err := p.parseRillYAML(ctx, path) @@ -369,6 +378,13 @@ func (p *Parser) parsePaths(ctx context.Context, paths []string) error { } i++ continue + } else if path == "/.env" { + err := p.parseDotEnv(ctx, path) + if err != nil { + p.addParseError(path, err) + } + i++ + continue } // Identify the range of paths with the same stem as paths[i] diff --git a/runtime/controller.go b/runtime/controller.go index 4d5af6a078c..0ccbd66586a 100644 --- a/runtime/controller.go +++ b/runtime/controller.go @@ -39,6 +39,7 @@ type Reconciler interface { Close(ctx context.Context) error AssignSpec(from, to *runtimev1.Resource) error AssignState(from, to *runtimev1.Resource) error + ResetState(r *runtimev1.Resource) error Reconcile(ctx context.Context, n *runtimev1.ResourceName) ReconcileResult } @@ -283,21 +284,21 @@ func (c *Controller) Run(ctx context.Context) error { // Get returns a resource by name. // Soft-deleted resources (i.e. resources where DeletedOn != nil) are not returned. -func (c *Controller) Get(ctx context.Context, name *runtimev1.ResourceName) (*runtimev1.Resource, error) { +func (c *Controller) Get(ctx context.Context, name *runtimev1.ResourceName, clone bool) (*runtimev1.Resource, error) { c.checkRunning() c.lock(ctx, true) defer c.unlock(ctx, true) - return c.catalog.get(name, false) + return c.catalog.get(name, false, clone) } // List returns a list of resources of the specified kind. // If kind is empty, all resources are returned. // Soft-deleted resources (i.e. resources where DeletedOn != nil) are not returned. -func (c *Controller) List(ctx context.Context, kind string) ([]*runtimev1.Resource, error) { +func (c *Controller) List(ctx context.Context, kind string, clone bool) ([]*runtimev1.Resource, error) { c.checkRunning() c.lock(ctx, true) defer c.unlock(ctx, true) - return c.catalog.list(kind, false) + return c.catalog.list(kind, false, clone) } // SubscribeCallback is the callback type passed to Subscribe. @@ -343,7 +344,7 @@ func (c *Controller) Create(ctx context.Context, name *runtimev1.ResourceName, r // A deleted resource with the same name may exist and be running. If so, we first cancel it. requeued := false if inv, ok := c.invocations[nameStr(name)]; ok { - r, err := c.catalog.get(name, true) + r, err := c.catalog.get(name, true, false) if err != nil { return fmt.Errorf("internal: got catalog error for reconciling resource: %w", err) } @@ -409,7 +410,7 @@ func (c *Controller) UpdateName(ctx context.Context, name, newName, owner *runti c.enqueue(name) } - r, err := c.catalog.get(name, true) + r, err := c.catalog.get(name, true, false) if err != nil { return err } @@ -660,7 +661,7 @@ func (c *Controller) isReconcilerForResource(ctx context.Context, n *runtimev1.R // It does nothing if the resource is not currently being renamed (RenamedFrom == nil). // It must be called while c.mu is held. func (c *Controller) safeMutateRenamed(n *runtimev1.ResourceName) error { - r, err := c.catalog.get(n, true) + r, err := c.catalog.get(n, true, false) if err != nil { if errors.Is(err, drivers.ErrResourceNotFound) { return nil @@ -678,7 +679,7 @@ func (c *Controller) safeMutateRenamed(n *runtimev1.ResourceName) error { return err } - _, err = c.catalog.get(renamedFrom, true) + _, err = c.catalog.get(renamedFrom, true, false) if err == nil { // A new resource with the name of the old one has been created in the mean time, so no delete is necessary (reconciler will bring to desired state). return nil @@ -712,7 +713,7 @@ func (c *Controller) safeRename(from, to *runtimev1.ResourceName) error { // There's a collision if to matches RenamedFrom of another resource. collision := false for _, n := range c.catalog.renamed { - r, err := c.catalog.get(n, true) + r, err := c.catalog.get(n, true, false) if err != nil { return fmt.Errorf("internal: failed to get renamed resource %v: %w", n, err) } @@ -729,7 +730,7 @@ func (c *Controller) safeRename(from, to *runtimev1.ResourceName) error { // Collision, do a create+delete instead of a rename // (since creation might fail if the name is taken, whereas the delete is almost certain to succeed) - r, err := c.catalog.get(from, true) + r, err := c.catalog.get(from, true, false) if err != nil { return err } @@ -803,6 +804,7 @@ func (c *Controller) processQueue() error { } // markPending marks a resource and its descendents as pending. +// It also clears errors on every resource marked pending - it would be confusing to show an old error after a change has been made that may fix it. // It returns true if it already now knows that the resource can't be scheduled and will be re-triggered later (e.g. by being added to a waitlist). // It must be called while c.mu is held. func (c *Controller) markPending(n *runtimev1.ResourceName) (bool, error) { @@ -810,7 +812,7 @@ func (c *Controller) markPending(n *runtimev1.ResourceName) (bool, error) { c.timeline.Remove(n) // Get resource - r, err := c.catalog.get(n, true) + r, err := c.catalog.get(n, true, false) if err != nil { if errors.Is(err, drivers.ErrResourceNotFound) { return true, nil @@ -826,7 +828,11 @@ func (c *Controller) markPending(n *runtimev1.ResourceName) (bool, error) { return true, nil } - // Not running - mark pending + // Not running - clear error and mark pending + err = c.catalog.updateError(n, nil) + if err != nil { + return false, err + } err = c.catalog.updateStatus(n, runtimev1.ReconcileStatus_RECONCILE_STATUS_PENDING, time.Time{}) if err != nil { return false, err @@ -849,13 +855,17 @@ func (c *Controller) markPending(n *runtimev1.ResourceName) (bool, error) { // Ensure all descendents get marked pending and cancel any running descendents. descendentRunning := false err = c.catalog.dag.Visit(n, func(ds string, dn *runtimev1.ResourceName) error { - dr, err := c.catalog.get(dn, true) + dr, err := c.catalog.get(dn, true, false) if err != nil { return fmt.Errorf("error getting dag node %q: %w", ds, err) } switch dr.Meta.ReconcileStatus { case runtimev1.ReconcileStatus_RECONCILE_STATUS_IDLE: - // Mark it pending + // Clear error and mark it pending + err = c.catalog.updateError(n, nil) + if err != nil { + return fmt.Errorf("error updating dag node %q: %w", ds, err) + } err = c.catalog.updateStatus(dn, runtimev1.ReconcileStatus_RECONCILE_STATUS_PENDING, time.Time{}) if err != nil { return fmt.Errorf("error updating dag node %q: %w", ds, err) @@ -904,7 +914,7 @@ func (c *Controller) markPending(n *runtimev1.ResourceName) (bool, error) { // The implementation relies on the key invariant that all resources awaiting to be reconciled have status=pending, *including descendents of a resource with status=pending*. // This is ensured through the assignment of status=pending in markPending. func (c *Controller) trySchedule(n *runtimev1.ResourceName) (bool, error) { - r, err := c.catalog.get(n, true) + r, err := c.catalog.get(n, true, false) if err != nil { if errors.Is(err, drivers.ErrResourceNotFound) { return true, nil @@ -915,7 +925,7 @@ func (c *Controller) trySchedule(n *runtimev1.ResourceName) (bool, error) { // Return true if any parents are pending or running parents := c.catalog.dag.Parents(n, true) for _, pn := range parents { - p, err := c.catalog.get(pn, true) + p, err := c.catalog.get(pn, true, false) if err != nil { return false, fmt.Errorf("internal: error getting present parent %q: %w", nameStr(pn), err) } @@ -999,7 +1009,7 @@ func (c *Controller) invoke(r *runtimev1.Resource) error { // - and, for itself if inv.reschedule is true // - and, for its children in the DAG if inv.reschedule is false func (c *Controller) processCompletedInvocation(inv *invocation) error { - r, err := c.catalog.get(inv.name, true) + r, err := c.catalog.get(inv.name, true, false) if err != nil { return err } @@ -1070,7 +1080,7 @@ func (c *Controller) processCompletedInvocation(inv *invocation) error { // Enqueue items from waitlist that haven't been updated (and hence re-triggered in the meantime). for _, e := range inv.waitlist { - r, err := c.catalog.get(e.name, true) + r, err := c.catalog.get(e.name, true, false) if err != nil { if errors.Is(err, drivers.ErrResourceNotFound) { continue diff --git a/runtime/drivers/bigquery/sql_store.go b/runtime/drivers/bigquery/sql_store.go index 69733e4627e..e5b507e3ab1 100644 --- a/runtime/drivers/bigquery/sql_store.go +++ b/runtime/drivers/bigquery/sql_store.go @@ -119,6 +119,10 @@ func (f *fileIterator) HasNext() bool { func (f *fileIterator) KeepFilesUntilClose(keepFilesUntilClose bool) { } +func (f *fileIterator) NextBatchSize(sizeInBytes int64) ([]string, error) { + return f.NextBatch(1) +} + // NextBatch implements drivers.FileIterator. // TODO :: currently it downloads all records in a single file. Need to check if it is efficient to ingest a single file with size in tens of GBs or more. func (f *fileIterator) NextBatch(limit int) ([]string, error) { diff --git a/runtime/drivers/blob/blobdownloader.go b/runtime/drivers/blob/blobdownloader.go index 07e667ea8e7..5ac24bbafa0 100644 --- a/runtime/drivers/blob/blobdownloader.go +++ b/runtime/drivers/blob/blobdownloader.go @@ -18,10 +18,9 @@ import ( "golang.org/x/sync/errgroup" ) -// increasing this limit can increase speed ingestion -// but may increase bottleneck at duckdb or network/db IO -// set without any benchamarks -const _concurrentBlobDownloadLimit = 32 +// Number of concurrent file downloads. +// 23-11-13: Experimented with increasing the value to 16. It caused network saturation errors on macOS. +const _concurrentBlobDownloadLimit = 8 // map of supoprted extensions for partial downloads vs readers // zipped csv files can't be partialled downloaded @@ -152,6 +151,104 @@ func (it *blobIterator) HasNext() bool { return it.index < len(it.objects) } +// NextBatchSize downloads next n files and copies to local directory +func (it *blobIterator) NextBatchSize(sizeInBytes int64) ([]string, error) { + if !it.HasNext() { + return nil, io.EOF + } + if len(it.nextPaths) != 0 { + paths := it.nextPaths + it.index += len(paths) + it.nextPaths = nil + return paths, nil + } + + if !it.opts.KeepFilesUntilClose { + // delete files created in last iteration + fileutil.ForceRemoveFiles(it.localFiles) + } + + // new slice creation is not necessary on every iteration + // but there may be cases where n in first batch is different from n in next batch + // to keep things easy creating a new slice every time + it.localFiles = make([]string, 0) + g, grpCtx := errgroup.WithContext(it.ctx) + g.SetLimit(_concurrentBlobDownloadLimit) + + var totalSizeInBytes int64 + start := it.index + for ; it.index < len(it.objects) && totalSizeInBytes < sizeInBytes; it.index++ { + obj := it.objects[it.index] + totalSizeInBytes += obj.obj.Size + g.Go(func() error { + // need to create file by maintaining same dir path as in glob for hivepartition support + filename := filepath.Join(it.tempDir, obj.obj.Key) + if err := os.MkdirAll(filepath.Dir(filename), os.ModePerm); err != nil { + return err + } + + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, os.ModePerm) + if err != nil { + return err + } + defer file.Close() + + it.localFiles = append(it.localFiles, file.Name()) + ext := filepath.Ext(obj.obj.Key) + partialReader, isPartialDownloadSupported := _partialDownloadReaders[ext] + downloadFull := obj.full || !isPartialDownloadSupported + + // Collect metrics of download size and time + startTime := time.Now() + defer func() { + size := obj.obj.Size + st, err := file.Stat() + if err == nil { + size = st.Size() + } + + duration := time.Since(startTime) + it.logger.Info("download complete", zap.String("object", obj.obj.Key), zap.Duration("duration", duration), observability.ZapCtx(it.ctx)) + drivers.RecordDownloadMetrics(grpCtx, &drivers.DownloadMetrics{ + Connector: "blob", + Ext: ext, + Partial: !downloadFull, + Duration: duration, + Size: size, + }) + }() + + // download full file + if downloadFull { + return downloadObject(grpCtx, it.bucket, obj.obj.Key, file) + } + // download partial file + // check if, for smaller size we can download entire file + switch partialReader { + case "parquet": + return downloadParquet(grpCtx, it.bucket, obj.obj, obj.extractOption, file) + case "csv": + return downloadText(grpCtx, it.bucket, obj.obj, &textExtractOption{extractOption: obj.extractOption, hasCSVHeader: true}, file) + case "json": + return downloadText(grpCtx, it.bucket, obj.obj, &textExtractOption{extractOption: obj.extractOption, hasCSVHeader: false}, file) + default: + // should not reach here + panic(fmt.Errorf("partial download not supported for extension %q", ext)) + } + }) + } + + if err := g.Wait(); err != nil { + return nil, err + } + + // clients can make changes to slice if passing the same slice that iterator holds + // creating a copy since we want to delete all these files on next batch/close + result := make([]string, it.index-start) + copy(result, it.localFiles) + return result, nil +} + // NextBatch downloads next n files and copies to local directory func (it *blobIterator) NextBatch(n int) ([]string, error) { if !it.HasNext() { diff --git a/runtime/drivers/drivers.go b/runtime/drivers/drivers.go index 68b8c1d386c..150b7f5337e 100644 --- a/runtime/drivers/drivers.go +++ b/runtime/drivers/drivers.go @@ -5,11 +5,15 @@ import ( "errors" "fmt" + "github.com/c2h5oh/datasize" "github.com/rilldata/rill/runtime/pkg/activity" "go.uber.org/zap" ) -const _iteratorBatch = 32 +const ( + _iteratorBatch = 32 + _iteratorBatchSizeInBytes = int64(5 * datasize.GB) +) var ErrIngestionLimitExceeded = fmt.Errorf("connectors: source ingestion exceeds limit") diff --git a/runtime/drivers/druid/druid.go b/runtime/drivers/druid/druid.go index 1c9a5cc9720..35970d38aef 100644 --- a/runtime/drivers/druid/druid.go +++ b/runtime/drivers/druid/druid.go @@ -130,3 +130,7 @@ func (c *connection) AsSQLStore() (drivers.SQLStore, bool) { func (c *connection) EstimateSize() (int64, bool) { return 0, false } + +func (c *connection) AcquireLongRunning(ctx context.Context) (func(), error) { + return func() {}, nil +} diff --git a/runtime/drivers/druid/olap.go b/runtime/drivers/druid/olap.go index 79fb2605015..5fe38f18374 100644 --- a/runtime/drivers/druid/olap.go +++ b/runtime/drivers/druid/olap.go @@ -13,7 +13,7 @@ func (c *connection) Dialect() drivers.Dialect { return drivers.DialectDruid } -func (c *connection) WithConnection(ctx context.Context, priority int, fn drivers.WithConnectionFunc) error { +func (c *connection) WithConnection(ctx context.Context, priority int, longRunning, tx bool, fn drivers.WithConnectionFunc) error { panic("not implemented") } diff --git a/runtime/drivers/duckdb/config.go b/runtime/drivers/duckdb/config.go index 9760be34f4f..eb207442f2d 100644 --- a/runtime/drivers/duckdb/config.go +++ b/runtime/drivers/duckdb/config.go @@ -14,6 +14,8 @@ type config struct { DSN string `mapstructure:"dsn"` // PoolSize is the number of concurrent connections and queries allowed PoolSize int `mapstructure:"pool_size"` + // AllowHostAccess denotes whether to limit access to the local environment and file system + AllowHostAccess bool `mapstructure:"allow_host_access"` // DBFilePath is the path where the database is stored. It is inferred from the DSN (can't be provided by user). DBFilePath string `mapstructure:"-"` } diff --git a/runtime/drivers/duckdb/duckdb.go b/runtime/drivers/duckdb/duckdb.go index 1f4158f2efc..0b9e104718d 100644 --- a/runtime/drivers/duckdb/duckdb.go +++ b/runtime/drivers/duckdb/duckdb.go @@ -73,17 +73,18 @@ func (d Driver) Open(cfgMap map[string]any, shared bool, ac activity.Client, log ctx, cancel := context.WithCancel(context.Background()) c := &connection{ - config: cfg, - logger: logger, - activity: ac, - metaSem: semaphore.NewWeighted(1), - olapSem: priorityqueue.NewSemaphore(olapSemSize), - dbCond: sync.NewCond(&sync.Mutex{}), - driverConfig: cfgMap, - driverName: d.name, - shared: shared, - ctx: ctx, - cancel: cancel, + config: cfg, + logger: logger, + activity: ac, + metaSem: semaphore.NewWeighted(1), + olapSem: priorityqueue.NewSemaphore(olapSemSize), + longRunningSem: semaphore.NewWeighted(1), // Currently hard-coded to 1 + dbCond: sync.NewCond(&sync.Mutex{}), + driverConfig: cfgMap, + driverName: d.name, + shared: shared, + ctx: ctx, + cancel: cancel, } // Open the DB @@ -151,6 +152,12 @@ type connection struct { // This creates contention for the same connection in database/sql's pool, but its locks will handle that. metaSem *semaphore.Weighted olapSem *priorityqueue.Semaphore + // The OLAP interface additionally provides an option to limit the number of long-running queries, as designated by the caller. + // longRunningSem enforces this limitation. + longRunningSem *semaphore.Weighted + // The OLAP interface also provides an option to acquire a connection "transactionally". + // We've run into issues with DuckDB freezing up on transactions, so we just use a lock for now to serialize them (inconsistency in case of crashes is acceptable). + txMu sync.RWMutex // If DuckDB encounters a fatal error, all queries will fail until the DB has been reopened. // When dbReopen is set to true, dbCond will be used to stop acquisition of new connections, // and then when dbConnCount becomes 0, the DB will be reopened and dbReopen set to false again. @@ -272,6 +279,12 @@ func (c *connection) reopenDB() error { "SET timezone='UTC'", } + // We want to set preserve_insertion_order=false in hosted environments only (where source data is never viewed directly). Setting it reduces batch data ingestion time by ~40%. + // Hack: Using AllowHostAccess as a proxy indicator for a hosted environment. + if !c.config.AllowHostAccess { + bootQueries = append(bootQueries, "SET preserve_insertion_order TO false") + } + // DuckDB extensions need to be loaded separately on each connection, but the built-in connection pool in database/sql doesn't enable that. // So we use go-duckdb's custom connector to pass a callback that it invokes for each new connection. connector, err := duckdb.NewConnector(c.config.DSN, func(execer driver.ExecerContext) error { @@ -340,30 +353,64 @@ func (c *connection) acquireMetaConn(ctx context.Context) (*sqlx.Conn, func() er // acquireOLAPConn gets a connection from the pool for OLAP queries (i.e. slow queries). // It returns a function that puts the connection back in the pool (if applicable). -func (c *connection) acquireOLAPConn(ctx context.Context, priority int) (*sqlx.Conn, func() error, error) { +func (c *connection) acquireOLAPConn(ctx context.Context, priority int, longRunning, tx bool) (*sqlx.Conn, func() error, error) { // Try to get conn from context (means the call is wrapped in WithConnection) conn := connFromContext(ctx) if conn != nil { return conn, func() error { return nil }, nil } + // Acquire long-running semaphore if applicable + if longRunning { + err := c.longRunningSem.Acquire(ctx, 1) + if err != nil { + return nil, nil, err + } + } + // Acquire semaphore err := c.olapSem.Acquire(ctx, priority) if err != nil { + if longRunning { + c.longRunningSem.Release(1) + } return nil, nil, err } + // Poor man's transaction support – see struct docstring for details + if tx { + c.txMu.Lock() + } else { + c.txMu.RLock() + } + // Get new conn conn, releaseConn, err := c.acquireConn(ctx) if err != nil { + if tx { + c.txMu.Unlock() + } else { + c.txMu.RUnlock() + } c.olapSem.Release() + if longRunning { + c.longRunningSem.Release(1) + } return nil, nil, err } // Build release func release := func() error { err := releaseConn() + if tx { + c.txMu.Unlock() + } else { + c.txMu.RUnlock() + } c.olapSem.Release() + if longRunning { + c.longRunningSem.Release(1) + } return err } diff --git a/runtime/drivers/duckdb/duckdb_test.go b/runtime/drivers/duckdb/duckdb_test.go index 8828bc302cf..322329d980f 100644 --- a/runtime/drivers/duckdb/duckdb_test.go +++ b/runtime/drivers/duckdb/duckdb_test.go @@ -163,7 +163,7 @@ func TestFatalErrConcurrent(t *testing.T) { LEFT JOIN d ON b.b12 = d.d1 WHERE d.d2 IN (''); ` - err1 = olap.WithConnection(context.Background(), 0, func(ctx, ensuredCtx context.Context, _ *sql.Conn) error { + err1 = olap.WithConnection(context.Background(), 0, false, false, func(ctx, ensuredCtx context.Context, _ *sql.Conn) error { time.Sleep(500 * time.Millisecond) return olap.Exec(ctx, &drivers.Statement{Query: qry}) }) @@ -176,7 +176,7 @@ func TestFatalErrConcurrent(t *testing.T) { var err2 error go func() { qry := `SELECT * FROM a;` - err2 = olap.WithConnection(context.Background(), 0, func(ctx, ensuredCtx context.Context, _ *sql.Conn) error { + err2 = olap.WithConnection(context.Background(), 0, false, false, func(ctx, ensuredCtx context.Context, _ *sql.Conn) error { time.Sleep(1000 * time.Millisecond) return olap.Exec(ctx, &drivers.Statement{Query: qry}) }) @@ -190,7 +190,7 @@ func TestFatalErrConcurrent(t *testing.T) { go func() { time.Sleep(250 * time.Millisecond) qry := `SELECT * FROM a;` - err3 = olap.WithConnection(context.Background(), 0, func(ctx, ensuredCtx context.Context, _ *sql.Conn) error { + err3 = olap.WithConnection(context.Background(), 0, false, false, func(ctx, ensuredCtx context.Context, _ *sql.Conn) error { return olap.Exec(ctx, &drivers.Statement{Query: qry}) }) wg.Done() diff --git a/runtime/drivers/duckdb/olap.go b/runtime/drivers/duckdb/olap.go index d07a03726cb..5ea3ae40dcf 100644 --- a/runtime/drivers/duckdb/olap.go +++ b/runtime/drivers/duckdb/olap.go @@ -30,14 +30,14 @@ func (c *connection) Dialect() drivers.Dialect { return drivers.DialectDuckDB } -func (c *connection) WithConnection(ctx context.Context, priority int, fn drivers.WithConnectionFunc) error { +func (c *connection) WithConnection(ctx context.Context, priority int, longRunning, tx bool, fn drivers.WithConnectionFunc) error { // Check not nested if connFromContext(ctx) != nil { panic("nested WithConnection") } // Acquire connection - conn, release, err := c.acquireOLAPConn(ctx, priority) + conn, release, err := c.acquireOLAPConn(ctx, priority, longRunning, tx) if err != nil { return err } @@ -116,7 +116,7 @@ func (c *connection) Execute(ctx context.Context, stmt *drivers.Statement) (res }() // Acquire connection - conn, release, err := c.acquireOLAPConn(ctx, stmt.Priority) + conn, release, err := c.acquireOLAPConn(ctx, stmt.Priority, stmt.LongRunning, false) acquiredTime = time.Now() if err != nil { return nil, err diff --git a/runtime/drivers/duckdb/transporter/duckDB_to_duckDB.go b/runtime/drivers/duckdb/transporter/duckDB_to_duckDB.go index 4a0c4386064..39e9a59a87a 100644 --- a/runtime/drivers/duckdb/transporter/duckDB_to_duckDB.go +++ b/runtime/drivers/duckdb/transporter/duckDB_to_duckDB.go @@ -34,5 +34,5 @@ func (t *duckDBToDuckDB) Transfer(ctx context.Context, srcProps, sinkProps map[s } qry := fmt.Sprintf("CREATE OR REPLACE TABLE %s AS (%s)", safeName(sinkCfg.Table), srcCfg.SQL) - return t.to.Exec(ctx, &drivers.Statement{Query: qry, Priority: 1}) + return t.to.Exec(ctx, &drivers.Statement{Query: qry, Priority: 1, LongRunning: true}) } diff --git a/runtime/drivers/duckdb/transporter/filestore_to_duckDB.go b/runtime/drivers/duckdb/transporter/filestore_to_duckDB.go index fce9a54b237..5a2e0437d2a 100644 --- a/runtime/drivers/duckdb/transporter/filestore_to_duckDB.go +++ b/runtime/drivers/duckdb/transporter/filestore_to_duckDB.go @@ -67,7 +67,7 @@ func (t *fileStoreToDuckDB) Transfer(ctx context.Context, srcProps, sinkProps ma } qry := fmt.Sprintf("CREATE OR REPLACE TABLE %s AS (SELECT * FROM %s)", safeName(sinkCfg.Table), from) - err = t.to.Exec(ctx, &drivers.Statement{Query: qry, Priority: 1}) + err = t.to.Exec(ctx, &drivers.Statement{Query: qry, Priority: 1, LongRunning: true}) if err != nil { return err } diff --git a/runtime/drivers/duckdb/transporter/motherduck_to_duckDB.go b/runtime/drivers/duckdb/transporter/motherduck_to_duckDB.go index 1e6961d7aa2..c3dc282fdbd 100644 --- a/runtime/drivers/duckdb/transporter/motherduck_to_duckDB.go +++ b/runtime/drivers/duckdb/transporter/motherduck_to_duckDB.go @@ -40,7 +40,7 @@ func (t *motherduckToDuckDB) Transfer(ctx context.Context, srcProps, sinkProps m } config := t.from.Config() - err = t.to.WithConnection(ctx, 1, func(ctx, ensuredCtx context.Context, _ *sql.Conn) error { + err = t.to.WithConnection(ctx, 1, true, false, func(ctx, ensuredCtx context.Context, _ *sql.Conn) error { res, err := t.to.Execute(ctx, &drivers.Statement{Query: "SELECT current_database();"}) if err != nil { return err diff --git a/runtime/drivers/duckdb/transporter/objectStore_to_duckDB.go b/runtime/drivers/duckdb/transporter/objectStore_to_duckDB.go index c94c50ecd1d..d55949c0133 100644 --- a/runtime/drivers/duckdb/transporter/objectStore_to_duckDB.go +++ b/runtime/drivers/duckdb/transporter/objectStore_to_duckDB.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/c2h5oh/datasize" "github.com/rilldata/rill/runtime/drivers" "github.com/rilldata/rill/runtime/pkg/duckdbsql" "github.com/rilldata/rill/runtime/pkg/fileutil" @@ -79,8 +80,16 @@ func (t *objectStoreToDuckDB) Transfer(ctx context.Context, srcProps, sinkProps a := newAppender(t.to, sinkCfg, ingestionProps, allowSchemaRelaxation, t.logger) + batchSize := opts.IteratorBatchSizeInBytes + if val, ok := srcProps["batch_size"].(string); ok { + b, err := datasize.ParseString(val) + if err != nil { + return err + } + batchSize = int64(b.Bytes()) + } for iterator.HasNext() { - files, err := iterator.NextBatch(opts.IteratorBatch) + files, err := iterator.NextBatchSize(batchSize) if err != nil { return err } @@ -103,7 +112,7 @@ func (t *objectStoreToDuckDB) Transfer(ctx context.Context, srcProps, sinkProps } query := fmt.Sprintf("CREATE OR REPLACE TABLE %s AS (SELECT * FROM %s);", safeName(sinkCfg.Table), from) - if err := t.to.Exec(ctx, &drivers.Statement{Query: query, Priority: 1}); err != nil { + if err := t.to.Exec(ctx, &drivers.Statement{Query: query, Priority: 1, LongRunning: true}); err != nil { return err } } @@ -151,7 +160,7 @@ func (a *appender) appendData(ctx context.Context, files []string, format string query = fmt.Sprintf("INSERT INTO %s (SELECT * FROM %s);", safeName(a.sink.Table), from) } a.logger.Debug("generated query", zap.String("query", query), observability.ZapCtx(ctx)) - err = a.to.Exec(ctx, &drivers.Statement{Query: query, Priority: 1}) + err = a.to.Exec(ctx, &drivers.Statement{Query: query, Priority: 1, LongRunning: true}) if err == nil || !a.allowSchemaRelaxation || !containsAny(err.Error(), []string{"binder error", "conversion error"}) { return err } @@ -165,7 +174,7 @@ func (a *appender) appendData(ctx context.Context, files []string, format string query = fmt.Sprintf("INSERT INTO %s BY NAME (SELECT * FROM %s);", safeName(a.sink.Table), from) a.logger.Debug("generated query", zap.String("query", query), observability.ZapCtx(ctx)) - return a.to.Exec(ctx, &drivers.Statement{Query: query, Priority: 1}) + return a.to.Exec(ctx, &drivers.Statement{Query: query, Priority: 1, LongRunning: true}) } // updateSchema updates the schema of the table in case new file adds a new column or @@ -226,7 +235,7 @@ func (a *appender) updateSchema(ctx context.Context, from string, fileNames []st for colName, colType := range newCols { a.tableSchema[colName] = colType qry := fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s", safeName(a.sink.Table), safeName(colName), colType) - if err := a.to.Exec(ctx, &drivers.Statement{Query: qry}); err != nil { + if err := a.to.Exec(ctx, &drivers.Statement{Query: qry, LongRunning: true}); err != nil { return err } } @@ -234,7 +243,7 @@ func (a *appender) updateSchema(ctx context.Context, from string, fileNames []st for colName, colType := range colTypeChanged { a.tableSchema[colName] = colType qry := fmt.Sprintf("ALTER TABLE %s ALTER COLUMN %s SET DATA TYPE %s", safeName(a.sink.Table), safeName(colName), colType) - if err := a.to.Exec(ctx, &drivers.Statement{Query: qry}); err != nil { + if err := a.to.Exec(ctx, &drivers.Statement{Query: qry, LongRunning: true}); err != nil { return err } } @@ -243,7 +252,7 @@ func (a *appender) updateSchema(ctx context.Context, from string, fileNames []st } func (a *appender) scanSchemaFromQuery(ctx context.Context, qry string) (map[string]string, error) { - result, err := a.to.Execute(ctx, &drivers.Statement{Query: qry, Priority: 1}) + result, err := a.to.Execute(ctx, &drivers.Statement{Query: qry, Priority: 1, LongRunning: true}) if err != nil { return nil, err } @@ -320,7 +329,7 @@ func (t *objectStoreToDuckDB) ingestDuckDBSQL( st := time.Now() query := fmt.Sprintf("CREATE OR REPLACE TABLE %s AS (%s);", dbSink.Table, sql) - err = t.to.Exec(ctx, &drivers.Statement{Query: query, Priority: 1}) + err = t.to.Exec(ctx, &drivers.Statement{Query: query, Priority: 1, LongRunning: true}) if err != nil { return err } diff --git a/runtime/drivers/duckdb/transporter/sqlstore_to_duckDB.go b/runtime/drivers/duckdb/transporter/sqlstore_to_duckDB.go index 8e0780f98a9..610442c4741 100644 --- a/runtime/drivers/duckdb/transporter/sqlstore_to_duckDB.go +++ b/runtime/drivers/duckdb/transporter/sqlstore_to_duckDB.go @@ -71,7 +71,7 @@ func (s *sqlStoreToDuckDB) Transfer(ctx context.Context, srcProps, sinkProps map query = fmt.Sprintf("INSERT INTO %s (SELECT * FROM %s);", safeName(sinkCfg.Table), from) } - if err := s.to.Exec(ctx, &drivers.Statement{Query: query, Priority: 1}); err != nil { + if err := s.to.Exec(ctx, &drivers.Statement{Query: query, Priority: 1, LongRunning: true}); err != nil { return err } } diff --git a/runtime/drivers/duckdb/transporter/transporter_test.go b/runtime/drivers/duckdb/transporter/transporter_test.go index ee8b43887bd..e36e49a70a4 100644 --- a/runtime/drivers/duckdb/transporter/transporter_test.go +++ b/runtime/drivers/duckdb/transporter/transporter_test.go @@ -37,6 +37,11 @@ func (m *mockIterator) NextBatch(limit int) ([]string, error) { return m.batches[m.index-1], nil } +func (m *mockIterator) NextBatchSize(sizeInBytes int64) ([]string, error) { + m.index += 1 + return m.batches[m.index-1], nil +} + func (m *mockIterator) HasNext() bool { return m.index < len(m.batches) } diff --git a/runtime/drivers/object_store.go b/runtime/drivers/object_store.go index 0d298fb4e75..48714fb8573 100644 --- a/runtime/drivers/object_store.go +++ b/runtime/drivers/object_store.go @@ -15,6 +15,10 @@ type FileIterator interface { // NextBatch returns a list of file downloaded from external sources // and cleanups file created in previous batch NextBatch(limit int) ([]string, error) + // NextBatchSize returns a list of file downloaded from external sources + // such that the size of all files is less than equal to sizeInBytes + // and cleanups file created in previous batch + NextBatchSize(sizeInBytes int64) ([]string, error) // HasNext can be utlisied to check if iterator has more elements left HasNext() bool // Size returns size of data downloaded in unit. diff --git a/runtime/drivers/olap.go b/runtime/drivers/olap.go index 7f72b84f6bb..de23c8b8d2b 100644 --- a/runtime/drivers/olap.go +++ b/runtime/drivers/olap.go @@ -22,7 +22,7 @@ type WithConnectionFunc func(wrappedCtx context.Context, ensuredCtx context.Cont // OLAPStore is implemented by drivers that are capable of storing, transforming and serving analytical queries. type OLAPStore interface { Dialect() Dialect - WithConnection(ctx context.Context, priority int, fn WithConnectionFunc) error + WithConnection(ctx context.Context, priority int, longRunning, tx bool, fn WithConnectionFunc) error Exec(ctx context.Context, stmt *Statement) error Execute(ctx context.Context, stmt *Statement) (*Result, error) InformationSchema() InformationSchema @@ -35,6 +35,7 @@ type Statement struct { Args []any DryRun bool Priority int + LongRunning bool ExecutionTimeout time.Duration } diff --git a/runtime/drivers/transporter.go b/runtime/drivers/transporter.go index 8863da010b2..f76e2b22fff 100644 --- a/runtime/drivers/transporter.go +++ b/runtime/drivers/transporter.go @@ -12,14 +12,16 @@ type Transporter interface { } type TransferOpts struct { - IteratorBatch int - LimitInBytes int64 + IteratorBatch int + IteratorBatchSizeInBytes int64 + LimitInBytes int64 } func NewTransferOpts(opts ...TransferOption) *TransferOpts { t := &TransferOpts{ - IteratorBatch: _iteratorBatch, - LimitInBytes: math.MaxInt64, + IteratorBatch: _iteratorBatch, + LimitInBytes: math.MaxInt64, + IteratorBatchSizeInBytes: _iteratorBatchSizeInBytes, } for _, opt := range opts { @@ -36,6 +38,12 @@ func WithIteratorBatch(b int) TransferOption { } } +func WithIteratorBatchSizeInBytes(b int64) TransferOption { + return func(t *TransferOpts) { + t.IteratorBatchSizeInBytes = b + } +} + func WithLimitInBytes(limit int64) TransferOption { return func(t *TransferOpts) { t.LimitInBytes = limit diff --git a/runtime/queries/column_timeseries.go b/runtime/queries/column_timeseries.go index 34165468183..6f01a07d105 100644 --- a/runtime/queries/column_timeseries.go +++ b/runtime/queries/column_timeseries.go @@ -99,7 +99,7 @@ func (q *ColumnTimeseries) Resolve(ctx context.Context, rt *runtime.Runtime, ins return nil } - return olap.WithConnection(ctx, priority, func(ctx context.Context, ensuredCtx context.Context, _ *sql.Conn) error { + return olap.WithConnection(ctx, priority, false, false, func(ctx context.Context, ensuredCtx context.Context, _ *sql.Conn) error { filter, args, err := buildFilterClauseForMetricsViewFilter(q.MetricsView, q.MetricsViewFilter, olap.Dialect(), q.MetricsViewPolicy) if err != nil { return err diff --git a/runtime/queries/table_columns.go b/runtime/queries/table_columns.go index 73783fbc336..1b6dd175215 100644 --- a/runtime/queries/table_columns.go +++ b/runtime/queries/table_columns.go @@ -58,7 +58,7 @@ func (q *TableColumns) Resolve(ctx context.Context, rt *runtime.Runtime, instanc return fmt.Errorf("not available for dialect '%s'", olap.Dialect()) } - return olap.WithConnection(ctx, priority, func(ctx context.Context, ensuredCtx context.Context, _ *sql.Conn) error { + return olap.WithConnection(ctx, priority, false, false, func(ctx context.Context, ensuredCtx context.Context, _ *sql.Conn) error { // views return duplicate column names, so we need to create a temporary table temporaryTableName := tempName("profile_columns_") err = olap.Exec(ctx, &drivers.Statement{ diff --git a/runtime/reconcilers/README.md b/runtime/reconcilers/README.md index 1962e75305c..20cbab9b0e1 100644 --- a/runtime/reconcilers/README.md +++ b/runtime/reconcilers/README.md @@ -37,4 +37,4 @@ - Calls to `Reconcile` can run for a long time (as long as they respond quickly to a cancelled `ctx`). - `Reconcile` should strive to keep a resource's `.State` correct at all times because it may be accessed while `Reconcile` is running to resolve API requests (such as dashboard queries). - The `Reconciler` struct is shared for all resources of the registered kind for a given instance ID. This enables it to cache (ephemeral) state in-between invocations for optimization. - +- The resource's meta and spec (but not state) may be updated concurrently. Calls to `Get` return a clone of the resource, but if the reconciler update's the resource's meta or spec, it must use a lock to read and update it. diff --git a/runtime/reconcilers/metrics_view.go b/runtime/reconcilers/metrics_view.go index db6c0b46464..e5107b8508f 100644 --- a/runtime/reconcilers/metrics_view.go +++ b/runtime/reconcilers/metrics_view.go @@ -47,12 +47,23 @@ func (r *MetricsViewReconciler) AssignState(from, to *runtimev1.Resource) error return nil } +func (r *MetricsViewReconciler) ResetState(res *runtimev1.Resource) error { + res.GetMetricsView().State = &runtimev1.MetricsViewState{} + return nil +} + func (r *MetricsViewReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceName) runtime.ReconcileResult { - self, err := r.C.Get(ctx, n) + self, err := r.C.Get(ctx, n, true) if err != nil { return runtime.ReconcileResult{Err: err} } mv := self.GetMetricsView() + if mv == nil { + return runtime.ReconcileResult{Err: errors.New("not a metrics view")} + } + + // NOTE: Not checking refs here since refs may still be valid even if they have errors (in case of staged changes). + // Instead, we just validate against the table name. validateErr := r.validate(ctx, mv.Spec) diff --git a/runtime/reconcilers/migration.go b/runtime/reconcilers/migration.go index ec5610e6ff6..fd48d541049 100644 --- a/runtime/reconcilers/migration.go +++ b/runtime/reconcilers/migration.go @@ -2,6 +2,7 @@ package reconcilers import ( "context" + "errors" "fmt" runtimev1 "github.com/rilldata/rill/proto/gen/rill/runtime/v1" @@ -46,12 +47,26 @@ func (r *MigrationReconciler) AssignState(from, to *runtimev1.Resource) error { return nil } +func (r *MigrationReconciler) ResetState(res *runtimev1.Resource) error { + res.GetMigration().State = &runtimev1.MigrationState{} + return nil +} + func (r *MigrationReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceName) runtime.ReconcileResult { - self, err := r.C.Get(ctx, n) + self, err := r.C.Get(ctx, n, true) if err != nil { return runtime.ReconcileResult{Err: err} } mig := self.GetMigration() + if mig == nil { + return runtime.ReconcileResult{Err: errors.New("not a migration")} + } + + // Check refs - stop if any of them are invalid + err = checkRefs(ctx, r.C, self.Meta.Refs) + if err != nil { + return runtime.ReconcileResult{Err: err} + } from := mig.State.Version to := mig.Spec.Version @@ -106,7 +121,7 @@ func (r *MigrationReconciler) executeMigration(ctx context.Context, self *runtim if name.Kind == compilerv1.ResourceKindUnspecified { return compilerv1.TemplateResource{}, fmt.Errorf("can't resolve name %q without kind specified", name.Name) } - res, err := r.C.Get(ctx, resourceNameFromCompiler(name)) + res, err := r.C.Get(ctx, resourceNameFromCompiler(name), false) if err != nil { return compilerv1.TemplateResource{}, err } @@ -128,7 +143,8 @@ func (r *MigrationReconciler) executeMigration(ctx context.Context, self *runtim defer release() return olap.Exec(ctx, &drivers.Statement{ - Query: sql, - Priority: 100, + Query: sql, + Priority: 100, + LongRunning: true, }) } diff --git a/runtime/reconcilers/model.go b/runtime/reconcilers/model.go index 56bc0e2efb0..07c1e03fb2c 100644 --- a/runtime/reconcilers/model.go +++ b/runtime/reconcilers/model.go @@ -5,6 +5,7 @@ import ( "crypto/md5" "encoding/binary" "encoding/hex" + "errors" "fmt" "time" @@ -12,6 +13,7 @@ import ( "github.com/rilldata/rill/runtime" compilerv1 "github.com/rilldata/rill/runtime/compilers/rillv1" "github.com/rilldata/rill/runtime/drivers" + "golang.org/x/exp/slog" "google.golang.org/protobuf/types/known/timestamppb" ) @@ -53,12 +55,20 @@ func (r *ModelReconciler) AssignState(from, to *runtimev1.Resource) error { return nil } +func (r *ModelReconciler) ResetState(res *runtimev1.Resource) error { + res.GetModel().State = &runtimev1.ModelState{} + return nil +} + func (r *ModelReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceName) runtime.ReconcileResult { - self, err := r.C.Get(ctx, n) + self, err := r.C.Get(ctx, n, true) if err != nil { return runtime.ReconcileResult{Err: err} } model := self.GetModel() + if model == nil { + return runtime.ReconcileResult{Err: errors.New("not a model")} + } // The view/table name is derived from the resource name. // We only set src.State.Table after it has been created, @@ -78,13 +88,8 @@ func (r *ModelReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceNa // Handle renames if self.Meta.RenamedFrom != nil { if t, ok := olapTableInfo(ctx, r.C, model.State.Connector, model.State.Table); ok { - // Clear any existing table with the new name - if t2, ok := olapTableInfo(ctx, r.C, model.State.Connector, tableName); ok { - olapDropTableIfExists(ctx, r.C, model.State.Connector, t2.Name, t2.View) - } - // Rename and update state - err = olapRenameTable(ctx, r.C, model.State.Connector, model.State.Table, tableName, t.View) + err = olapForceRenameTable(ctx, r.C, model.State.Connector, model.State.Table, t.View, tableName) if err != nil { return runtime.ReconcileResult{Err: fmt.Errorf("failed to rename model: %w", err)} } @@ -97,12 +102,28 @@ func (r *ModelReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceNa // Note: Not exiting early. It might need to be created/materialized., and we need to set the correct retrigger time based on the refresh schedule. } - // TODO: Exit if refs have errors - - // TODO: Incorporate changes to refs in hash – track if refs have changed (deleted, added, or state updated) + // Check refs - stop if any of them are invalid + err = checkRefs(ctx, r.C, self.Meta.Refs) + if err != nil { + if !model.Spec.StageChanges && model.State.Table != "" { + // Remove previously ingested table + if t, ok := olapTableInfo(ctx, r.C, model.State.Connector, model.State.Table); ok { + olapDropTableIfExists(ctx, r.C, model.State.Connector, model.State.Table, t.View) + } + model.State.Connector = "" + model.State.Table = "" + model.State.SpecHash = "" + model.State.RefreshedOn = nil + err = r.C.UpdateState(ctx, self.Meta.Name, self) + if err != nil { + r.C.Logger.Error("refs check: failed to update state", slog.Any("err", err)) + } + } + return runtime.ReconcileResult{Err: err} + } // Use a hash of execution-related fields from the spec to determine if something has changed - hash, err := r.executionSpecHash(model.Spec) + hash, err := r.executionSpecHash(ctx, self.Meta.Refs, model.Spec) if err != nil { return runtime.ReconcileResult{Err: fmt.Errorf("failed to compute hash: %w", err)} } @@ -128,6 +149,7 @@ func (r *ModelReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceNa // Decide if we should trigger an update trigger := model.Spec.Trigger trigger = trigger || model.State.Table == "" + trigger = trigger || model.State.Table != tableName trigger = trigger || model.State.RefreshedOn == nil trigger = trigger || model.State.SpecHash != hash trigger = trigger || !exists @@ -191,12 +213,8 @@ func (r *ModelReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceNa createErr = fmt.Errorf("failed to create model: %w", createErr) } if createErr == nil && stage { - // Drop the main view/table - if t, ok := olapTableInfo(ctx, r.C, connector, tableName); ok { - olapDropTableIfExists(ctx, r.C, connector, t.Name, t.View) - } // Rename the staging table to main view/table - err = olapRenameTable(ctx, r.C, connector, stagingTableName, tableName, !materialize) + err = olapForceRenameTable(ctx, r.C, connector, stagingTableName, !materialize, tableName) if err != nil { return runtime.ReconcileResult{Err: fmt.Errorf("failed to rename staged model: %w", err)} } @@ -251,8 +269,7 @@ func (r *ModelReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceNa // Reset spec.Trigger if model.Spec.Trigger { - model.Spec.Trigger = false - err = r.C.UpdateSpec(ctx, self.Meta.Name, self) + err := r.setTriggerFalse(ctx, n) if err != nil { return runtime.ReconcileResult{Err: err} } @@ -289,10 +306,35 @@ func (r *ModelReconciler) delayedMaterializeTime(spec *runtimev1.ModelSpec, sinc return since.Add(time.Duration(spec.MaterializeDelaySeconds) * time.Second), true } -// executionSpecHash computes a hash of only those model spec properties that impact execution. -func (r *ModelReconciler) executionSpecHash(spec *runtimev1.ModelSpec) (string, error) { +// executionSpecHash computes a hash of only those model properties that impact execution. +func (r *ModelReconciler) executionSpecHash(ctx context.Context, refs []*runtimev1.ResourceName, spec *runtimev1.ModelSpec) (string, error) { hash := md5.New() + for _, ref := range refs { // Refs are always sorted + // Write name + _, err := hash.Write([]byte(ref.Kind)) + if err != nil { + return "", err + } + _, err = hash.Write([]byte(ref.Name)) + if err != nil { + return "", err + } + + // Write state version (doesn't matter how the spec or meta has changed, only if/when state changes) + r, err := r.C.Get(ctx, ref, false) + var stateVersion int64 + if err == nil { + stateVersion = r.Meta.StateVersion + } else { + stateVersion = -1 + } + err = binary.Write(hash, binary.BigEndian, stateVersion) + if err != nil { + return "", err + } + } + _, err := hash.Write([]byte(spec.Connector)) if err != nil { return "", err @@ -321,6 +363,26 @@ func (r *ModelReconciler) executionSpecHash(spec *runtimev1.ModelSpec) (string, return hex.EncodeToString(hash.Sum(nil)), nil } +// setTriggerFalse sets the model's spec.Trigger to false. +// Unlike the State, the Spec may be edited concurrently with a Reconcile call, so we need to read and edit it under a lock. +func (r *ModelReconciler) setTriggerFalse(ctx context.Context, n *runtimev1.ResourceName) error { + r.C.Lock(ctx) + defer r.C.Unlock(ctx) + + self, err := r.C.Get(ctx, n, false) + if err != nil { + return err + } + + model := self.GetModel() + if model == nil { + return fmt.Errorf("not a model") + } + + model.Spec.Trigger = false + return r.C.UpdateSpec(ctx, self.Meta.Name, self) +} + // createModel creates or updates the model in the OLAP connector. func (r *ModelReconciler) createModel(ctx context.Context, self *runtimev1.Resource, tableName string, view bool) error { inst, err := r.C.Runtime.FindInstance(ctx, r.C.InstanceID) @@ -348,7 +410,7 @@ func (r *ModelReconciler) createModel(ctx context.Context, self *runtimev1.Resou if name.Kind == compilerv1.ResourceKindUnspecified { return compilerv1.TemplateResource{}, fmt.Errorf("can't resolve name %q without kind specified", name.Name) } - res, err := r.C.Get(ctx, resourceNameFromCompiler(name)) + res, err := r.C.Get(ctx, resourceNameFromCompiler(name), false) if err != nil { return compilerv1.TemplateResource{}, err } @@ -391,7 +453,8 @@ func (r *ModelReconciler) createModel(ctx context.Context, self *runtimev1.Resou } return olap.Exec(ctx, &drivers.Statement{ - Query: fmt.Sprintf("CREATE OR REPLACE %s %s AS (%s)", typ, safeSQLName(tableName), sql), - Priority: 100, + Query: fmt.Sprintf("CREATE OR REPLACE %s %s AS (%s)", typ, safeSQLName(tableName), sql), + Priority: 100, + LongRunning: true, }) } diff --git a/runtime/reconcilers/project_parser.go b/runtime/reconcilers/project_parser.go index 0fbf8a818f2..fdd167b9ec1 100644 --- a/runtime/reconcilers/project_parser.go +++ b/runtime/reconcilers/project_parser.go @@ -53,13 +53,21 @@ func (r *ProjectParserReconciler) AssignState(from, to *runtimev1.Resource) erro return nil } +func (r *ProjectParserReconciler) ResetState(res *runtimev1.Resource) error { + res.GetProjectParser().State = &runtimev1.ProjectParserState{} + return nil +} + func (r *ProjectParserReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceName) runtime.ReconcileResult { // Get ProjectParser resource - self, err := r.C.Get(ctx, n) + self, err := r.C.Get(ctx, n, true) if err != nil { return runtime.ReconcileResult{Err: err} } pp := self.GetProjectParser() + if pp == nil { + return runtime.ReconcileResult{Err: errors.New("not a project parser")} + } // Does not support renames if self.Meta.RenamedFrom != nil { @@ -71,7 +79,7 @@ func (r *ProjectParserReconciler) Reconcile(ctx context.Context, n *runtimev1.Re r.C.Lock(ctx) defer r.C.Unlock(ctx) - resources, err := r.C.List(ctx, "") + resources, err := r.C.List(ctx, "", false) if err != nil { return runtime.ReconcileResult{Err: err} } @@ -112,7 +120,7 @@ func (r *ProjectParserReconciler) Reconcile(ctx context.Context, n *runtimev1.Re } if pp.State.CurrentCommitSha != hash { pp.State.CurrentCommitSha = hash - err = r.C.UpdateState(ctx, n, self) // TODO: Pointer relationship between self and pp makes this hard to follow + err = r.C.UpdateState(ctx, n, self) if err != nil { return runtime.ReconcileResult{Err: err} } @@ -179,9 +187,9 @@ func (r *ProjectParserReconciler) Reconcile(ctx context.Context, n *runtimev1.Re // reconcileParser reconciles a parser's output with the current resources in the catalog. func (r *ProjectParserReconciler) reconcileParser(ctx context.Context, self *runtimev1.Resource, parser *compilerv1.Parser, diff *compilerv1.Diff) error { - // Update state from rill.yaml - if diff == nil || diff.ModifiedRillYAML { - err := r.reconcileRillYAML(ctx, parser) + // Update state from rill.yaml and .env + if diff == nil || diff.ModifiedRillYAML || diff.ModifiedDotEnv { + err := r.reconcileProjectConfig(ctx, parser) if err != nil { return err } @@ -215,8 +223,8 @@ func (r *ProjectParserReconciler) reconcileParser(ctx context.Context, self *run return r.reconcileResources(ctx, self, parser) } -// reconcileRillYAML updates instance config derived from rill.yaml -func (r *ProjectParserReconciler) reconcileRillYAML(ctx context.Context, parser *compilerv1.Parser) error { +// reconcileProjectConfig updates instance config derived from rill.yaml and .env +func (r *ProjectParserReconciler) reconcileProjectConfig(ctx context.Context, parser *compilerv1.Parser) error { inst, err := r.C.Runtime.FindInstance(ctx, r.C.InstanceID) if err != nil { return err @@ -226,6 +234,9 @@ func (r *ProjectParserReconciler) reconcileRillYAML(ctx context.Context, parser for _, v := range parser.RillYAML.Variables { vars[v.Name] = v.Default } + for k, v := range parser.DotEnv { + vars[k] = v + } inst.ProjectVariables = vars err = r.C.Runtime.EditInstance(ctx, inst) @@ -242,7 +253,7 @@ func (r *ProjectParserReconciler) reconcileResources(ctx context.Context, self * var deleteResources []*runtimev1.Resource // Pass over all existing resources in the catalog. - resources, err := r.C.List(ctx, "") + resources, err := r.C.List(ctx, "", false) if err != nil { return err } @@ -317,7 +328,7 @@ func (r *ProjectParserReconciler) reconcileResourcesDiff(ctx context.Context, se // Gather resource to delete so we can check for renames. deleteResources := make([]*runtimev1.Resource, 0, len(diff.Deleted)) for _, n := range diff.Deleted { - r, err := r.C.Get(ctx, resourceNameFromCompiler(n)) + r, err := r.C.Get(ctx, resourceNameFromCompiler(n), false) if err != nil { return err } @@ -326,7 +337,7 @@ func (r *ProjectParserReconciler) reconcileResourcesDiff(ctx context.Context, se // Updates for _, n := range diff.Modified { - existing, err := r.C.Get(ctx, resourceNameFromCompiler(n)) + existing, err := r.C.Get(ctx, resourceNameFromCompiler(n), false) if err != nil { return err } @@ -434,7 +445,7 @@ func (r *ProjectParserReconciler) putParserResourceDef(ctx context.Context, self } // Update meta if refs or file paths changed - if !slices.Equal(existing.Meta.FilePaths, def.Paths) || !slices.Equal(existing.Meta.Refs, refs) { // TODO: Don't use slices.Equal for protos + if !slices.Equal(existing.Meta.FilePaths, def.Paths) || !equalResourceNames(existing.Meta.Refs, refs) { err := r.C.UpdateMeta(ctx, n, refs, self.Meta.Name, def.Paths) if err != nil { return err @@ -544,6 +555,18 @@ func equalResourceName(a, b *runtimev1.ResourceName) bool { return a.Kind == b.Kind && strings.EqualFold(a.Name, b.Name) } +func equalResourceNames(a, b []*runtimev1.ResourceName) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !equalResourceName(v, b[i]) { + return false + } + } + return true +} + func equalSourceSpec(a, b *runtimev1.SourceSpec) bool { return proto.Equal(a, b) } diff --git a/runtime/reconcilers/pull_trigger.go b/runtime/reconcilers/pull_trigger.go index d33b80c6019..98dc4f027fa 100644 --- a/runtime/reconcilers/pull_trigger.go +++ b/runtime/reconcilers/pull_trigger.go @@ -47,8 +47,13 @@ func (r *PullTriggerReconciler) AssignState(from, to *runtimev1.Resource) error return nil } +func (r *PullTriggerReconciler) ResetState(res *runtimev1.Resource) error { + res.GetPullTrigger().State = &runtimev1.PullTriggerState{} + return nil +} + func (r *PullTriggerReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceName) runtime.ReconcileResult { - self, err := r.C.Get(ctx, n) + self, err := r.C.Get(ctx, n, true) if err != nil { return runtime.ReconcileResult{Err: err} } diff --git a/runtime/reconcilers/refresh_trigger.go b/runtime/reconcilers/refresh_trigger.go index 3c3f4c20741..8b55895b895 100644 --- a/runtime/reconcilers/refresh_trigger.go +++ b/runtime/reconcilers/refresh_trigger.go @@ -2,6 +2,7 @@ package reconcilers import ( "context" + "errors" "fmt" runtimev1 "github.com/rilldata/rill/proto/gen/rill/runtime/v1" @@ -48,18 +49,29 @@ func (r *RefreshTriggerReconciler) AssignState(from, to *runtimev1.Resource) err return nil } +func (r *RefreshTriggerReconciler) ResetState(res *runtimev1.Resource) error { + res.GetRefreshTrigger().State = &runtimev1.RefreshTriggerState{} + return nil +} + func (r *RefreshTriggerReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceName) runtime.ReconcileResult { - self, err := r.C.Get(ctx, n) + self, err := r.C.Get(ctx, n, true) if err != nil { return runtime.ReconcileResult{Err: err} } trigger := self.GetRefreshTrigger() + if trigger == nil { + return runtime.ReconcileResult{Err: errors.New("not a refresh trigger")} + } if self.Meta.DeletedOn != nil { return runtime.ReconcileResult{} } - resources, err := r.C.List(ctx, "") + r.C.Lock(ctx) + defer r.C.Unlock(ctx) + + resources, err := r.C.List(ctx, "", false) if err != nil { return runtime.ReconcileResult{Err: err} } diff --git a/runtime/reconcilers/source.go b/runtime/reconcilers/source.go index 017aff3e3fa..a9ddf88c97e 100644 --- a/runtime/reconcilers/source.go +++ b/runtime/reconcilers/source.go @@ -5,6 +5,7 @@ import ( "crypto/md5" "encoding/binary" "encoding/hex" + "errors" "fmt" "time" @@ -12,6 +13,7 @@ import ( "github.com/rilldata/rill/runtime" "github.com/rilldata/rill/runtime/drivers" "github.com/rilldata/rill/runtime/pkg/pbutil" + "golang.org/x/exp/slog" "google.golang.org/protobuf/types/known/structpb" "google.golang.org/protobuf/types/known/timestamppb" ) @@ -54,12 +56,20 @@ func (r *SourceReconciler) AssignState(from, to *runtimev1.Resource) error { return nil } +func (r *SourceReconciler) ResetState(res *runtimev1.Resource) error { + res.GetSource().State = &runtimev1.SourceState{} + return nil +} + func (r *SourceReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceName) runtime.ReconcileResult { - self, err := r.C.Get(ctx, n) + self, err := r.C.Get(ctx, n, true) if err != nil { return runtime.ReconcileResult{Err: err} } src := self.GetSource() + if src == nil { + return runtime.ReconcileResult{Err: errors.New("not a source")} + } // The table name to ingest into is derived from the resource name. // We only set src.State.Table after ingestion is complete. @@ -78,13 +88,8 @@ func (r *SourceReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceN // Check if the table exists (it should, but might somehow have been corrupted) t, ok := olapTableInfo(ctx, r.C, src.State.Connector, src.State.Table) if ok && !t.View { // Checking View only out of caution (would indicate very corrupted DB) - // Clear any existing table with the new name - if t2, ok := olapTableInfo(ctx, r.C, src.State.Connector, tableName); ok { - olapDropTableIfExists(ctx, r.C, src.State.Connector, tableName, t2.View) - } - // Rename and update state - err = olapRenameTable(ctx, r.C, src.State.Connector, src.State.Table, tableName, false) + err = olapForceRenameTable(ctx, r.C, src.State.Connector, src.State.Table, false, tableName) if err != nil { return runtime.ReconcileResult{Err: fmt.Errorf("failed to rename table: %w", err)} } @@ -97,7 +102,23 @@ func (r *SourceReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceN // Note: Not exiting early. It might need to be (re-)ingested, and we need to set the correct retrigger time based on the refresh schedule. } - // TODO: Exit if refs have errors + // Check refs - stop if any of them are invalid + err = checkRefs(ctx, r.C, self.Meta.Refs) + if err != nil { + if !src.Spec.StageChanges && src.State.Table != "" { + // Remove previously ingested table + olapDropTableIfExists(ctx, r.C, src.State.Connector, src.State.Table, false) + src.State.Connector = "" + src.State.Table = "" + src.State.SpecHash = "" + src.State.RefreshedOn = nil + err = r.C.UpdateState(ctx, self.Meta.Name, self) + if err != nil { + r.C.Logger.Error("refs check: failed to update state", slog.Any("err", err)) + } + } + return runtime.ReconcileResult{Err: err} + } // Use a hash of ingestion-related fields from the spec to determine if we need to re-ingest hash, err := r.ingestionSpecHash(src.Spec) @@ -158,12 +179,8 @@ func (r *SourceReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceN ingestErr = fmt.Errorf("failed to ingest source: %w", ingestErr) } if ingestErr == nil && src.Spec.StageChanges { - // Drop the main table name - if t, ok := olapTableInfo(ctx, r.C, connector, tableName); ok { - olapDropTableIfExists(ctx, r.C, connector, tableName, t.View) - } // Rename staging table to main table - err = olapRenameTable(ctx, r.C, connector, stagingTableName, tableName, false) + err = olapForceRenameTable(ctx, r.C, connector, stagingTableName, false, tableName) if err != nil { return runtime.ReconcileResult{Err: fmt.Errorf("failed to rename staging table: %w", err)} } @@ -218,8 +235,7 @@ func (r *SourceReconciler) Reconcile(ctx context.Context, n *runtimev1.ResourceN // Reset spec.Trigger if src.Spec.Trigger { - src.Spec.Trigger = false - err = r.C.UpdateSpec(ctx, self.Meta.Name, self) + err := r.setTriggerFalse(ctx, n) if err != nil { return runtime.ReconcileResult{Err: err} } @@ -267,6 +283,26 @@ func (r *SourceReconciler) stagingTableName(table string) string { return "__rill_tmp_src_" + table } +// setTriggerFalse sets the source's spec.Trigger to false. +// Unlike the State, the Spec may be edited concurrently with a Reconcile call, so we need to read and edit it under a lock. +func (r *SourceReconciler) setTriggerFalse(ctx context.Context, n *runtimev1.ResourceName) error { + r.C.Lock(ctx) + defer r.C.Unlock(ctx) + + self, err := r.C.Get(ctx, n, false) + if err != nil { + return err + } + + source := self.GetSource() + if source == nil { + return fmt.Errorf("not a source") + } + + source.Spec.Trigger = false + return r.C.UpdateSpec(ctx, self.Meta.Name, self) +} + // ingestSource ingests the source into a table with tableName. // It does NOT drop the table if ingestion fails after the table has been created. // It will return an error if the sink connector is not an OLAP. diff --git a/runtime/reconcilers/util.go b/runtime/reconcilers/util.go index 04e5b7f864a..56ef7a982f6 100644 --- a/runtime/reconcilers/util.go +++ b/runtime/reconcilers/util.go @@ -3,6 +3,7 @@ package reconcilers import ( "context" "database/sql" + "errors" "fmt" "strings" "time" @@ -13,6 +14,26 @@ import ( "github.com/robfig/cron/v3" ) +// checkRefs checks that all refs exist, are idle, and have no errors. +func checkRefs(ctx context.Context, c *runtime.Controller, refs []*runtimev1.ResourceName) error { + for _, ref := range refs { + res, err := c.Get(ctx, ref, false) + if err != nil { + if errors.Is(err, drivers.ErrResourceNotFound) { + return fmt.Errorf("dependency error: resource %q (%s) not found", ref.Name, ref.Kind) + } + return fmt.Errorf("dependency error: failed to get resource %q (%s): %w", ref.Name, ref.Kind, err) + } + if res.Meta.ReconcileStatus != runtimev1.ReconcileStatus_RECONCILE_STATUS_IDLE { + return fmt.Errorf("dependency error: resource %q (%s) is not idle", ref.Name, ref.Kind) + } + if res.Meta.ReconcileError != "" { + return fmt.Errorf("dependency error: resource %q (%s) has an error", ref.Name, ref.Kind) + } + } + return nil +} + // nextRefreshTime returns the earliest time AFTER t that the schedule should trigger. func nextRefreshTime(t time.Time, schedule *runtimev1.Schedule) (time.Time, error) { if schedule == nil { @@ -86,19 +107,20 @@ func olapDropTableIfExists(ctx context.Context, c *runtime.Controller, connector } _ = olap.Exec(ctx, &drivers.Statement{ - Query: fmt.Sprintf("DROP %s IF EXISTS %s", typ, safeSQLName(table)), - Priority: 100, + Query: fmt.Sprintf("DROP %s IF EXISTS %s", typ, safeSQLName(table)), + Priority: 100, + LongRunning: true, }) } -// olapRenameTable renames the table from oldName to newName in the OLAP connector. -// oldName must exist and newName must not exist. -func olapRenameTable(ctx context.Context, c *runtime.Controller, connector, oldName, newName string, view bool) error { - if oldName == "" || newName == "" { - return fmt.Errorf("cannot rename empty table name: oldName=%q, newName=%q", oldName, newName) +// olapForceRenameTable renames a table or view from fromName to toName in the OLAP connector. +// If a view or table already exists with toName, it is overwritten. +func olapForceRenameTable(ctx context.Context, c *runtime.Controller, connector, fromName string, fromIsView bool, toName string) error { + if fromName == "" || toName == "" { + return fmt.Errorf("cannot rename empty table name: fromName=%q, toName=%q", fromName, toName) } - if oldName == newName { + if fromName == toName { return nil } @@ -108,37 +130,60 @@ func olapRenameTable(ctx context.Context, c *runtime.Controller, connector, oldN } defer release() - var typ string - if view { - typ = "VIEW" - } else { - typ = "TABLE" - } + existingTo, _ := olap.InformationSchema().Lookup(ctx, toName) + + return olap.WithConnection(ctx, 100, true, true, func(ctx context.Context, ensuredCtx context.Context, conn *sql.Conn) error { + // Drop the existing table at toName + if existingTo != nil { + var typ string + if existingTo.View { + typ = "VIEW" + } else { + typ = "TABLE" + } + + err := olap.Exec(ctx, &drivers.Statement{ + Query: fmt.Sprintf("DROP %s IF EXISTS %s", typ, safeSQLName(existingTo.Name)), + }) + if err != nil { + return err + } + } + + // Infer SQL keyword for the table type + var typ string + if fromIsView { + typ = "VIEW" + } else { + typ = "TABLE" + } - // TODO: Use a transaction? - return olap.WithConnection(ctx, 100, func(ctx context.Context, ensuredCtx context.Context, conn *sql.Conn) error { // Renaming a table to the same name with different casing is not supported. Workaround by renaming to a temporary name first. - if strings.EqualFold(oldName, newName) { - tmp := "__rill_tmp_rename_%s_" + typ + newName - err = olap.Exec(ctx, &drivers.Statement{Query: fmt.Sprintf("DROP %s IF EXISTS %s", typ, safeSQLName(tmp))}) + if strings.EqualFold(fromName, toName) { + tmpName := "__rill_tmp_rename_%s_" + typ + toName + err = olap.Exec(ctx, &drivers.Statement{Query: fmt.Sprintf("DROP %s IF EXISTS %s", typ, safeSQLName(tmpName))}) if err != nil { return err } err := olap.Exec(ctx, &drivers.Statement{ - Query: fmt.Sprintf("ALTER %s %s RENAME TO %s", typ, safeSQLName(oldName), safeSQLName(tmp)), - Priority: 100, + Query: fmt.Sprintf("ALTER %s %s RENAME TO %s", typ, safeSQLName(fromName), safeSQLName(tmpName)), }) if err != nil { return err } - oldName = tmp + fromName = tmpName } - return olap.Exec(ctx, &drivers.Statement{ - Query: fmt.Sprintf("ALTER TABLE %s RENAME TO %s", safeSQLName(oldName), safeSQLName(newName)), - Priority: 100, + // Do the rename + err = olap.Exec(ctx, &drivers.Statement{ + Query: fmt.Sprintf("ALTER %s %s RENAME TO %s", typ, safeSQLName(fromName), safeSQLName(toName)), }) + if err != nil { + return err + } + + return nil }) } diff --git a/runtime/server/connectors.go b/runtime/server/connectors.go index 2e6c2cb009f..4fb45d6e78a 100644 --- a/runtime/server/connectors.go +++ b/runtime/server/connectors.go @@ -5,10 +5,12 @@ import ( "fmt" runtimev1 "github.com/rilldata/rill/proto/gen/rill/runtime/v1" + "github.com/rilldata/rill/runtime/compilers/rillv1" "github.com/rilldata/rill/runtime/drivers" "github.com/rilldata/rill/runtime/drivers/bigquery" "github.com/rilldata/rill/runtime/drivers/gcs" "github.com/rilldata/rill/runtime/drivers/s3" + "golang.org/x/exp/maps" ) // ListConnectors implements RuntimeService. @@ -60,6 +62,36 @@ func (s *Server) ListConnectors(ctx context.Context, req *runtimev1.ListConnecto return &runtimev1.ListConnectorsResponse{Connectors: pbs}, nil } +func (s *Server) ScanConnectors(ctx context.Context, req *runtimev1.ScanConnectorsRequest) (*runtimev1.ScanConnectorsResponse, error) { + repo, release, err := s.runtime.Repo(ctx, req.InstanceId) + if err != nil { + return nil, err + } + defer release() + + p, err := rillv1.Parse(ctx, repo, req.InstanceId, nil) + if err != nil { + return nil, err + } + + connectors, err := p.AnalyzeConnectors(ctx) + if err != nil { + return nil, err + } + + cMap := make(map[string]*runtimev1.ScannedConnector, len(connectors)) + for _, connector := range connectors { + cMap[connector.Name] = &runtimev1.ScannedConnector{ + Name: connector.Name, + Type: connector.Driver, + HasAnonymousAccess: connector.AnonymousAccess, + } + } + return &runtimev1.ScanConnectorsResponse{ + Connectors: maps.Values(cMap), + }, nil +} + func (s *Server) S3ListBuckets(ctx context.Context, req *runtimev1.S3ListBucketsRequest) (*runtimev1.S3ListBucketsResponse, error) { s3Conn, release, err := s.getS3Conn(ctx, req.Connector, req.InstanceId) if err != nil { diff --git a/runtime/server/controller.go b/runtime/server/controller.go index 56e2818f9bf..a8f02ca3309 100644 --- a/runtime/server/controller.go +++ b/runtime/server/controller.go @@ -39,7 +39,7 @@ func (s *Server) ListResources(ctx context.Context, req *runtimev1.ListResources return nil, status.Error(codes.InvalidArgument, err.Error()) } - rs, err := ctrl.List(ctx, req.Kind) + rs, err := ctrl.List(ctx, req.Kind, false) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } @@ -66,7 +66,7 @@ func (s *Server) WatchResources(req *runtimev1.WatchResourcesRequest, ss runtime } if req.Replay { - rs, err := ctrl.List(ss.Context(), req.Kind) + rs, err := ctrl.List(ss.Context(), req.Kind, false) if err != nil { return status.Error(codes.InvalidArgument, err.Error()) } @@ -116,7 +116,7 @@ func (s *Server) GetResource(ctx context.Context, req *runtimev1.GetResourceRequ return nil, status.Error(codes.InvalidArgument, err.Error()) } - r, err := ctrl.Get(ctx, req.Name) + r, err := ctrl.Get(ctx, req.Name, false) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } diff --git a/runtime/server/instances.go b/runtime/server/instances.go index 3d07b21657b..8f1884a7ed3 100644 --- a/runtime/server/instances.go +++ b/runtime/server/instances.go @@ -206,6 +206,7 @@ func (s *Server) EditInstanceAnnotations(ctx context.Context, req *runtimev1.Edi EmbedCatalog: oldInst.EmbedCatalog, IngestionLimitBytes: oldInst.IngestionLimitBytes, Variables: oldInst.Variables, + Connectors: oldInst.Connectors, Annotations: req.Annotations, } diff --git a/runtime/services/catalog/artifacts/yaml/objects.go b/runtime/services/catalog/artifacts/yaml/objects.go index 1dc63af35fa..7b9e2f3681a 100644 --- a/runtime/services/catalog/artifacts/yaml/objects.go +++ b/runtime/services/catalog/artifacts/yaml/objects.go @@ -33,6 +33,7 @@ type Source struct { GlobMaxObjectsMatched int `yaml:"glob.max_objects_matched,omitempty" mapstructure:"glob.max_objects_matched,omitempty"` GlobMaxObjectsListed int64 `yaml:"glob.max_objects_listed,omitempty" mapstructure:"glob.max_objects_listed,omitempty"` GlobPageSize int `yaml:"glob.page_size,omitempty" mapstructure:"glob.page_size,omitempty"` + BatchSize string `yaml:"batch_size,omitempty" mapstructure:"batch_size,omitempty"` HivePartition *bool `yaml:"hive_partitioning,omitempty" mapstructure:"hive_partitioning,omitempty"` Timeout int32 `yaml:"timeout,omitempty"` Format string `yaml:"format,omitempty" mapstructure:"format,omitempty"` @@ -173,6 +174,10 @@ func fromSourceArtifact(source *Source, path string) (*drivers.CatalogEntry, err props["glob.page_size"] = source.GlobPageSize } + if source.BatchSize != "" { + props["batch_size"] = source.BatchSize + } + if source.S3Endpoint != "" { props["endpoint"] = source.S3Endpoint } diff --git a/runtime/services/catalog/migrator/sources/sources.go b/runtime/services/catalog/migrator/sources/sources.go index b4b55d85b16..c05b15fa36c 100644 --- a/runtime/services/catalog/migrator/sources/sources.go +++ b/runtime/services/catalog/migrator/sources/sources.go @@ -60,24 +60,18 @@ func (m *sourceMigrator) Update(ctx context.Context, return err } - return olap.WithConnection(ctx, 100, func(ctx, ensuredCtx context.Context, conn *sql.Conn) error { - tx, err := conn.BeginTx(ctx, nil) + return olap.WithConnection(ctx, 100, true, true, func(ctx, ensuredCtx context.Context, conn *sql.Conn) error { + _, err = conn.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", apiSource.Name)) if err != nil { return err } - defer func() { _ = tx.Rollback() }() - _, err = tx.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", apiSource.Name)) + _, err = conn.ExecContext(ctx, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", tempName, apiSource.Name)) if err != nil { return err } - _, err = tx.ExecContext(ctx, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", tempName, apiSource.Name)) - if err != nil { - return err - } - - return tx.Commit() + return nil }) } diff --git a/web-common/orval.config.ts b/web-common/orval.config.ts index d1e6d7c904e..95d52c2bfa8 100644 --- a/web-common/orval.config.ts +++ b/web-common/orval.config.ts @@ -43,6 +43,11 @@ export default defineConfig({ useQuery: true, }, }, + QueryService_MetricsViewAggregation: { + query: { + useQuery: true, + }, + }, QueryService_MetricsViewTotals: { query: { useQuery: true, diff --git a/web-common/src/components/menu/core/MenuItem.svelte b/web-common/src/components/menu/core/MenuItem.svelte index 42f1bd67d8a..6b7af375f3c 100644 --- a/web-common/src/components/menu/core/MenuItem.svelte +++ b/web-common/src/components/menu/core/MenuItem.svelte @@ -71,6 +71,12 @@ } let hovered = false; + function onMouseOver() { + if (!disabled) { + hovered = true; + } + } + function onFocus() { if (!disabled) { $currentItem = itemID; @@ -124,6 +130,7 @@ ? 'rgb(75, 85, 99)' : 'rgb(235, 235, 235)'}" class=" + w-full text-left py-1 {icon ? 'px-2' : 'px-3'} @@ -147,7 +154,7 @@ class:selected class:cursor-not-allowed={disabled} aria-disabled={disabled} - on:mouseover={onFocus} + on:mouseover={onMouseOver} on:mouseleave={onBlur} on:focus={onFocus} on:blur={() => { @@ -155,7 +162,7 @@ hovered = false; } }} - on:click|stopPropagation={handleClick} + on:click={handleClick} > {#if icon}