1. 前言
轉載請說明原文出處, 尊重他人勞動成果!
源碼位置: https://github.com/nicktming/istio
分支: tming-v1.3.6 (基于1.3.6版本)
1. [istio源碼分析][galley] galley之上游(source)
2. [istio源碼分析][galley] galley之runtime
3. [istio源碼分析][galley] galley之下游(mcp)
在前面幾篇文章中已經分析了galley
的整個流程,galley
中最終把從source
(fs
,k8s
) 中獲得的數據會從mcp server
中push
到mcp client
, 那本文將會分析pilot
中configController
是如何使用mcp client
來接收數據并如何處理的.
2. ConfigController
先看一下
configController
在pilot
是如何初始化的.
// pilot/cmd/pilot-discovery/main.go
var (
serverArgs = bootstrap.PilotArgs{
CtrlZOptions: ctrlz.DefaultOptions(),
KeepaliveOptions: keepalive.DefaultOption(),
}
...
)
...
discoveryServer, err := bootstrap.NewServer(serverArgs)
...
// pilot/pkg/bootstrap/server.go
func NewServer(args PilotArgs) (*Server, error) {
if err := s.initMesh(&args); err != nil {
return nil, fmt.Errorf("mesh: %v", err)
}
...
if err := s.initConfigController(&args); err != nil {
return nil, fmt.Errorf("config controller: %v", err)
}
}
對分析不影響的代碼直接刪減了.
func (s *Server) initConfigController(args *PilotArgs) error {
if len(s.mesh.ConfigSources) > 0 {
// 如果有config source的配置 則配置mcp client
if err := s.initMCPConfigController(args); err != nil {
return err
}
}
...
// Create the config store.
s.istioConfigStore = model.MakeIstioStore(s.configController)
return nil
}
1. 可以看到
s.istioConfigStore
實質上就是s.configController
.
2. 主要關注mcp configuration
, 關于mesh
配置信息可以參考 [istio源碼分析] istio源碼開發調試版簡單安裝 .
2.1 initMCPConfigController
func (s *Server) initMCPConfigController(args *PilotArgs) error {
clientNodeID := ""
collections := make([]sink.CollectionOptions, len(model.IstioConfigTypes))
for i, t := range model.IstioConfigTypes {
// 都是istio crd資源 沒有原生的k8s資源 比如pod, service等
collections[i] = sink.CollectionOptions{Name: t.Collection, Incremental: false}
}
options := coredatamodel.Options{
DomainSuffix: args.Config.ControllerOptions.DomainSuffix,
// 后面會用到
ClearDiscoveryServerCache: func() {
s.EnvoyXdsServer.ConfigUpdate(&model.PushRequest{Full: true})
},
}
...
for _, configSource := range s.mesh.ConfigSources {
if strings.Contains(configSource.Address, fsScheme+"://") {
...
}
// 設置安全訪問的情況 在以后分析policy的時候會用到
securityOption := grpc.WithInsecure()
if configSource.TlsSettings != nil &&
configSource.TlsSettings.Mode != istio_networking_v1alpha3.TLSSettings_DISABLE {
...
}
...
conn, err := grpc.DialContext(
ctx, configSource.Address,
securityOption, msgSizeOption, keepaliveOption, initialWindowSizeOption, initialConnWindowSizeOption)
...
// 創建一個controller
mcpController := coredatamodel.NewController(options)
sinkOptions := &sink.Options{
CollectionOptions: collections,
Updater: mcpController,
ID: clientNodeID,
Reporter: reporter,
}
// 創建mcp client
cl := mcpapi.NewResourceSourceClient(conn)
mcpClient := sink.NewClient(cl, sinkOptions)
configz.Register(mcpClient)
clients = append(clients, mcpClient)
conns = append(conns, conn)
// 將該controller加入到configStores
configStores = append(configStores, mcpController)
}
...
// Wrap the config controller with a cache.
aggregateMcpController, err := configaggregate.MakeCache(configStores)
if err != nil {
return err
}
s.configController = aggregateMcpController
return nil
}
1. 關注
options.ClearDiscoveryServerCache
, 后面會用到.
2.coredatamodel.NewController(options)
創建一個controller
.
3.sink.NewClient(cl, sinkOptions)
創建一個mcp client
, 注意sinkOptions.Updater
就是2.中創建的controller
. 另外mcp server
端在galley
.
4.configaggregate.MakeCache(configStores)
是將所有的controller
按照其支持的collection
(比如virtualService
對應了哪些controller
)進行分類起來.
2.2 mcp client
// pkg/mcp/sink/client_sink.go
func NewClient(client mcp.ResourceSourceClient, options *Options) *Client {
return &Client{
Sink: New(options),
reporter: options.Reporter,
client: client,
}
}
// pkg/mcp/sink/sink.go
func New(options *Options) *Sink { // nolint: lll
nodeInfo := &mcp.SinkNode{
Id: options.ID,
Annotations: options.Metadata,
}
state := make(map[string]*perCollectionState)
// state來自options.CollectionOptions
for _, collection := range options.CollectionOptions {
state[collection.Name] = &perCollectionState{
versions: make(map[string]string),
requestIncremental: collection.Incremental,
}
}
return &Sink{
...
}
}
這里需要關注如下:
1. 可以看到Sink
中的state
來自options.CollectionOptions
, 往上追溯到initMCPConfigController
的model.IstioConfigTypes
.
IstioConfigTypes = ConfigDescriptor{
VirtualService,
Gateway,
ServiceEntry,
DestinationRule,
EnvoyFilter,
Sidecar,
HTTPAPISpec,
HTTPAPISpecBinding,
QuotaSpec,
QuotaSpecBinding,
AuthenticationPolicy,
AuthenticationMeshPolicy,
ServiceRole,
ServiceRoleBinding,
RbacConfig,
ClusterRbacConfig,
}
可以看到
model.IstioConfigTypes
中看到的都是istio
中的一些crd
資源, 也就是說從galley
中得到的config resource
都是這些資源, 沒有k8s
中的原生資源, 比如Pod
等.
2.3 mcp client Start
// pkg/mcp/sink/client_sink.go
func (c *Client) Run(ctx context.Context) {
...
for {
// 建立連接
for {
...
stream, err := c.client.EstablishResourceStream(ctx)
...
}
// 處理
err := c.ProcessStream(c.stream)
...
}
}
// pkg/mcp/sink/sink.go
func (sink *Sink) ProcessStream(stream Stream) error {
// send initial requests for each supported type
// 為每一個支持的類型發送一個初始的請求
initialRequests := sink.createInitialRequests()
for {
var req *mcp.RequestResources
if len(initialRequests) > 0 {
// 發送初始request
req = initialRequests[0]
initialRequests = initialRequests[1:]
} else {
// 從server端接收response
resources, err := stream.Recv()
if err != nil {
if err != io.EOF {
sink.reporter.RecordRecvError(err, status.Code(err))
scope.Errorf("Error receiving MCP resource: %v", err)
}
return err
}
// client端處理后需要發送ACK/NACK
// 所以處理response后組裝了一個request
req = sink.handleResponse(resources)
}
sink.journal.RecordRequestResources(req)
// 向server端發送request
if err := stream.Send(req); err != nil {
sink.reporter.RecordSendError(err, status.Code(err))
scope.Errorf("Error sending MCP request: %v", err)
return err
}
}
}
關于
mcp
中的client
和server
之間的交互在 [istio源碼分析][galley] galley之下游(mcp) 中已經有介紹, 這里再次說明一下.
關于
mcp
可以參考 https://github.com/istio/api/tree/master/mcp, 這里用此圖可以增加理解
mcp.png
對比此圖和
ProcessStream
來進行說明:
1.client
端為每一個支持的類型initialRequests
發送一個初始的請求.
2.server
端會返回一個response
.
3.client
端需要返回一個ACK/NACK
, 所以ProcessStream
中的sink.handleResponse(resources)
中處理完response
又構造了一個新的request
來返回給server
端.
所以先看一下都發了哪些類型:
func (sink *Sink) createInitialRequests() []*mcp.RequestResources {
sink.mu.Lock()
initialRequests := make([]*mcp.RequestResources, 0, len(sink.state))
// sink.state 來源自 initMCPConfigController中的model.IstioConfigTypes
for collection, state := range sink.state {
var initialResourceVersions map[string]string
if state.requestIncremental {
...
}
req := &mcp.RequestResources{
SinkNode: sink.nodeInfo,
Collection: collection,
InitialResourceVersions: initialResourceVersions,
Incremental: state.requestIncremental,
}
initialRequests = append(initialRequests, req)
}
sink.mu.Unlock()
return initialRequests
}
可以看到發送的類型就是
initMCPConfigController
中的model.IstioConfigTypes
.
2.4 handleResponse
func (sink *Sink) handleResponse(resources *mcp.Resources) *mcp.RequestResources {
if handleResponseDoneProbe != nil {
defer handleResponseDoneProbe()
}
// 必須是支持的類型
state, ok := sink.state[resources.Collection]
if !ok {
errDetails := status.Errorf(codes.Unimplemented, "unsupported collection %v", resources.Collection)
return sink.sendNACKRequest(resources, errDetails)
}
change := &Change{
Collection: resources.Collection,
Objects: make([]*Object, 0, len(resources.Resources)),
Removed: resources.RemovedResources,
Incremental: resources.Incremental,
SystemVersionInfo: resources.SystemVersionInfo,
}
for _, resource := range resources.Resources {
var dynamicAny types.DynamicAny
if err := types.UnmarshalAny(resource.Body, &dynamicAny); err != nil {
return sink.sendNACKRequest(resources, err)
}
// TODO - use galley metadata to verify collection and type_url match?
object := &Object{
TypeURL: resource.Body.TypeUrl,
Metadata: resource.Metadata,
Body: dynamicAny.Message,
}
change.Objects = append(change.Objects, object)
}
if err := sink.updater.Apply(change); err != nil {
// 發送NACK
errDetails := status.Error(codes.InvalidArgument, err.Error())
return sink.sendNACKRequest(resources, errDetails)
}
...
// ACK
sink.reporter.RecordRequestAck(resources.Collection, 0)
req := &mcp.RequestResources{
SinkNode: sink.nodeInfo,
Collection: resources.Collection,
ResponseNonce: resources.Nonce,
Incremental: useIncremental,
}
return req
}
1. 根據
response
發送回來的數據組裝成change
, 并將該change
作為參數調用sink.updater.Apply
方法.
2. 有任何錯誤會發送NACK
給server
端, 如果沒有錯誤就發送ACK
給server
端.
那
sink.updater
是什么呢? 在initMCPConfigController
中可以看到:
options := coredatamodel.Options{
DomainSuffix: args.Config.ControllerOptions.DomainSuffix,
// 后面會用到
ClearDiscoveryServerCache: func() {
s.EnvoyXdsServer.ConfigUpdate(&model.PushRequest{Full: true})
},
}
...
mcpController := coredatamodel.NewController(options)
sinkOptions := &sink.Options{
CollectionOptions: collections,
Updater: mcpController,
ID: clientNodeID,
Reporter: reporter,
}
// 創建mcp client
cl := mcpapi.NewResourceSourceClient(conn)
mcpClient := sink.NewClient(cl, sinkOptions)
sink.updater
就是mcpController
.
3. Controller
func NewController(options Options) CoreDataModel {
descriptorsByMessageName := make(map[string]model.ProtoSchema, len(model.IstioConfigTypes))
synced := make(map[string]bool)
for _, descriptor := range model.IstioConfigTypes {
// don't register duplicate descriptors for the same collection
if _, ok := descriptorsByMessageName[descriptor.Collection]; !ok {
descriptorsByMessageName[descriptor.Collection] = descriptor
synced[descriptor.Collection] = false
}
}
return &Controller{
...
}
}
關注一下
descriptorsByMessageName
是如何生成的即可.
3.1 Apply
func (c *Controller) Apply(change *sink.Change) error {
descriptor, ok := c.descriptorsByCollection[change.Collection]
if !ok {
return fmt.Errorf("apply type not supported %s", change.Collection)
}
schema, valid := c.ConfigDescriptor().GetByType(descriptor.Type)
if !valid {
return fmt.Errorf("descriptor type not supported %s", descriptor.Type)
}
c.syncedMu.Lock()
c.synced[change.Collection] = true
c.syncedMu.Unlock()
// innerStore is [namespace][name]
innerStore := make(map[string]map[string]*model.Config)
// 根據change的信息生成以innerStore
for _, obj := range change.Objects {
//構造innerStore
}
var prevStore map[string]map[string]*model.Config
c.configStoreMu.Lock()
prevStore = c.configStore[descriptor.Type]
c.configStore[descriptor.Type] = innerStore
c.configStoreMu.Unlock()
if descriptor.Type == model.ServiceEntry.Type {
c.serviceEntryEvents(innerStore, prevStore)
} else {
c.options.ClearDiscoveryServerCache()
}
return nil
}
1. 根據
change
構造innerStore
, 進而更新該類型在c.configStore
中的內容.
2. 根據舊內容prevStore
和新內容innerStore
來做分發工作.
2.1 如果是
ServiceEntry
, 調用serviceEntryEvents
方法.
func (c *Controller) serviceEntryEvents(currentStore, prevStore map[string]map[string]*model.Config) {
dispatch := func(model model.Config, event model.Event) {}
if handlers, ok := c.eventHandlers[model.ServiceEntry.Type]; ok {
dispatch = func(model model.Config, event model.Event) {
log.Debugf("MCP event dispatch: key=%v event=%v", model.Key(), event.String())
for _, handler := range handlers {
handler(model, event)
}
}
}
// add/update
for namespace, byName := range currentStore {
for name, config := range byName {
if prevByNamespace, ok := prevStore[namespace]; ok {
if prevConfig, ok := prevByNamespace[name]; ok {
if config.ResourceVersion != prevConfig.ResourceVersion {
dispatch(*config, model.EventUpdate)
}
} else {
dispatch(*config, model.EventAdd)
}
} else {
dispatch(*config, model.EventAdd)
}
}
}
...
}
func (c *Controller) RegisterEventHandler(typ string, handler func(model.Config, model.Event)) {
c.eventHandlers[typ] = append(c.eventHandlers[typ], handler)
}
通過注冊好了的
handler
來處理這些生成的事件.
2.2 如果不是
ServiceEntry
, 調用ClearDiscoveryServerCache
方法.
s.EnvoyXdsServer.ConfigUpdate(&model.PushRequest{Full: true})
所以這個放到以后分析.
4. 總結
pilot.png
從
mcp server
中接收數據后通過handleResponse
調用controller
的Apply
方法, 通過類型來進行處理. 處理完向server
端返回ACK/NACK
.
5. 參考
1.
istio 1.3.6源碼
2. https://cloud.tencent.com/developer/article/1409159