You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
593 lines
19 KiB
593 lines
19 KiB
// Copyright 2013 The ql Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSES/QL-LICENSE file.
|
|
|
|
// Copyright 2015 PingCAP, Inc.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package ddl
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/google/uuid"
|
|
"github.com/ngaut/pools"
|
|
"github.com/pingcap/errors"
|
|
"github.com/pingcap/failpoint"
|
|
"github.com/pingcap/parser/ast"
|
|
"github.com/pingcap/parser/model"
|
|
"github.com/pingcap/parser/mysql"
|
|
pumpcli "github.com/pingcap/tidb-tools/tidb-binlog/pump_client"
|
|
"github.com/pingcap/tidb/config"
|
|
"github.com/pingcap/tidb/ddl/util"
|
|
"github.com/pingcap/tidb/infoschema"
|
|
"github.com/pingcap/tidb/kv"
|
|
"github.com/pingcap/tidb/meta"
|
|
"github.com/pingcap/tidb/metrics"
|
|
"github.com/pingcap/tidb/owner"
|
|
"github.com/pingcap/tidb/sessionctx"
|
|
"github.com/pingcap/tidb/sessionctx/binloginfo"
|
|
"github.com/pingcap/tidb/sessionctx/variable"
|
|
"github.com/pingcap/tidb/table"
|
|
goutil "github.com/pingcap/tidb/util"
|
|
"github.com/pingcap/tidb/util/logutil"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
const (
|
|
// currentVersion is for all new DDL jobs.
|
|
currentVersion = 1
|
|
// DDLOwnerKey is the ddl owner path that is saved to etcd, and it's exported for testing.
|
|
DDLOwnerKey = "/tidb/ddl/fg/owner"
|
|
ddlPrompt = "ddl"
|
|
|
|
shardRowIDBitsMax = 15
|
|
|
|
batchAddingJobs = 10
|
|
|
|
// PartitionCountLimit is limit of the number of partitions in a table.
|
|
// Mysql maximum number of partitions is 8192, our maximum number of partitions is 1024.
|
|
// Reference linking https://dev.mysql.com/doc/refman/5.7/en/partitioning-limitations.html.
|
|
PartitionCountLimit = 1024
|
|
)
|
|
|
|
// OnExist specifies what to do when a new object has a name collision.
|
|
type OnExist uint8
|
|
|
|
const (
|
|
// OnExistError throws an error on name collision.
|
|
OnExistError OnExist = iota
|
|
// OnExistIgnore skips creating the new object.
|
|
OnExistIgnore
|
|
// OnExistReplace replaces the old object by the new object. This is only
|
|
// supported by VIEWs at the moment. For other object types, this is
|
|
// equivalent to OnExistError.
|
|
OnExistReplace
|
|
)
|
|
|
|
var (
|
|
// TableColumnCountLimit is limit of the number of columns in a table.
|
|
// It's exported for testing.
|
|
TableColumnCountLimit = uint32(512)
|
|
// EnableSplitTableRegion is a flag to decide whether to split a new region for
|
|
// a newly created table. It takes effect only if the Storage supports split
|
|
// region.
|
|
EnableSplitTableRegion = uint32(0)
|
|
)
|
|
|
|
// DDL is responsible for updating schema in data store and maintaining in-memory InfoSchema cache.
|
|
type DDL interface {
|
|
CreateSchema(ctx sessionctx.Context, name model.CIStr, charsetInfo *ast.CharsetOpt) error
|
|
AlterSchema(ctx sessionctx.Context, stmt *ast.AlterDatabaseStmt) error
|
|
DropSchema(ctx sessionctx.Context, schema model.CIStr) error
|
|
CreateTable(ctx sessionctx.Context, stmt *ast.CreateTableStmt) error
|
|
CreateView(ctx sessionctx.Context, stmt *ast.CreateViewStmt) error
|
|
DropTable(ctx sessionctx.Context, tableIdent ast.Ident) (err error)
|
|
RecoverTable(ctx sessionctx.Context, recoverInfo *RecoverInfo) (err error)
|
|
DropView(ctx sessionctx.Context, tableIdent ast.Ident) (err error)
|
|
CreateIndex(ctx sessionctx.Context, tableIdent ast.Ident, keyType ast.IndexKeyType, indexName model.CIStr,
|
|
columnNames []*ast.IndexPartSpecification, indexOption *ast.IndexOption, ifNotExists bool) error
|
|
DropIndex(ctx sessionctx.Context, tableIdent ast.Ident, indexName model.CIStr, ifExists bool) error
|
|
AlterTable(ctx sessionctx.Context, tableIdent ast.Ident, spec []*ast.AlterTableSpec) error
|
|
TruncateTable(ctx sessionctx.Context, tableIdent ast.Ident) error
|
|
RenameTable(ctx sessionctx.Context, oldTableIdent, newTableIdent ast.Ident, isAlterTable bool) error
|
|
LockTables(ctx sessionctx.Context, stmt *ast.LockTablesStmt) error
|
|
UnlockTables(ctx sessionctx.Context, lockedTables []model.TableLockTpInfo) error
|
|
CleanupTableLock(ctx sessionctx.Context, tables []*ast.TableName) error
|
|
UpdateTableReplicaInfo(ctx sessionctx.Context, physicalID int64, available bool) error
|
|
RepairTable(ctx sessionctx.Context, table *ast.TableName, createStmt *ast.CreateTableStmt) error
|
|
CreateSequence(ctx sessionctx.Context, stmt *ast.CreateSequenceStmt) error
|
|
DropSequence(ctx sessionctx.Context, tableIdent ast.Ident, ifExists bool) (err error)
|
|
|
|
// CreateSchemaWithInfo creates a database (schema) given its database info.
|
|
//
|
|
// If `tryRetainID` is true, this method will try to keep the database ID specified in
|
|
// the `info` rather than generating new ones. This is just a hint though, if the ID collides
|
|
// with an existing database a new ID will always be used.
|
|
//
|
|
// WARNING: the DDL owns the `info` after calling this function, and will modify its fields
|
|
// in-place. If you want to keep using `info`, please call Clone() first.
|
|
CreateSchemaWithInfo(
|
|
ctx sessionctx.Context,
|
|
info *model.DBInfo,
|
|
onExist OnExist,
|
|
tryRetainID bool) error
|
|
|
|
// CreateTableWithInfo creates a table, view or sequence given its table info.
|
|
//
|
|
// If `tryRetainID` is true, this method will try to keep the table ID specified in the `info`
|
|
// rather than generating new ones. This is just a hint though, if the ID collides with an
|
|
// existing table a new ID will always be used.
|
|
//
|
|
// WARNING: the DDL owns the `info` after calling this function, and will modify its fields
|
|
// in-place. If you want to keep using `info`, please call Clone() first.
|
|
CreateTableWithInfo(
|
|
ctx sessionctx.Context,
|
|
schema model.CIStr,
|
|
info *model.TableInfo,
|
|
onExist OnExist,
|
|
tryRetainID bool) error
|
|
|
|
// Start campaigns the owner and starts workers.
|
|
// ctxPool is used for the worker's delRangeManager and creates sessions.
|
|
Start(ctxPool *pools.ResourcePool) error
|
|
// GetLease returns current schema lease time.
|
|
GetLease() time.Duration
|
|
// Stats returns the DDL statistics.
|
|
Stats(vars *variable.SessionVars) (map[string]interface{}, error)
|
|
// GetScope gets the status variables scope.
|
|
GetScope(status string) variable.ScopeFlag
|
|
// Stop stops DDL worker.
|
|
Stop() error
|
|
// RegisterEventCh registers event channel for ddl.
|
|
RegisterEventCh(chan<- *util.Event)
|
|
// SchemaSyncer gets the schema syncer.
|
|
SchemaSyncer() util.SchemaSyncer
|
|
// OwnerManager gets the owner manager.
|
|
OwnerManager() owner.Manager
|
|
// GetID gets the ddl ID.
|
|
GetID() string
|
|
// GetTableMaxRowID gets the max row ID of a normal table or a partition.
|
|
GetTableMaxRowID(startTS uint64, tbl table.PhysicalTable) (int64, bool, error)
|
|
// SetBinlogClient sets the binlog client for DDL worker. It's exported for testing.
|
|
SetBinlogClient(*pumpcli.PumpsClient)
|
|
// GetHook gets the hook. It's exported for testing.
|
|
GetHook() Callback
|
|
}
|
|
|
|
type limitJobTask struct {
|
|
job *model.Job
|
|
err chan error
|
|
}
|
|
|
|
// ddl is used to handle the statements that define the structure or schema of the database.
|
|
type ddl struct {
|
|
m sync.RWMutex
|
|
quitCh chan struct{}
|
|
wg sync.WaitGroup // It's only used to deal with data race in state_test and schema_test.
|
|
limitJobCh chan *limitJobTask
|
|
|
|
*ddlCtx
|
|
workers map[workerType]*worker
|
|
sessPool *sessionPool
|
|
delRangeMgr delRangeManager
|
|
}
|
|
|
|
// ddlCtx is the context when we use worker to handle DDL jobs.
|
|
type ddlCtx struct {
|
|
uuid string
|
|
store kv.Storage
|
|
ownerManager owner.Manager
|
|
schemaSyncer util.SchemaSyncer
|
|
ddlJobDoneCh chan struct{}
|
|
ddlEventCh chan<- *util.Event
|
|
lease time.Duration // lease is schema lease.
|
|
binlogCli *pumpcli.PumpsClient // binlogCli is used for Binlog.
|
|
infoHandle *infoschema.Handle
|
|
tableLockCkr util.DeadTableLockChecker
|
|
|
|
// hook may be modified.
|
|
mu struct {
|
|
sync.RWMutex
|
|
hook Callback
|
|
interceptor Interceptor
|
|
}
|
|
}
|
|
|
|
func (dc *ddlCtx) isOwner() bool {
|
|
isOwner := dc.ownerManager.IsOwner()
|
|
logutil.BgLogger().Debug("[ddl] check whether is the DDL owner", zap.Bool("isOwner", isOwner), zap.String("selfID", dc.uuid))
|
|
if isOwner {
|
|
metrics.DDLCounter.WithLabelValues(metrics.DDLOwner + "_" + mysql.TiDBReleaseVersion).Inc()
|
|
}
|
|
return isOwner
|
|
}
|
|
|
|
// RegisterEventCh registers passed channel for ddl Event.
|
|
func (d *ddl) RegisterEventCh(ch chan<- *util.Event) {
|
|
d.ddlEventCh = ch
|
|
}
|
|
|
|
// asyncNotifyEvent will notify the ddl event to outside world, say statistic handle. When the channel is full, we may
|
|
// give up notify and log it.
|
|
func asyncNotifyEvent(d *ddlCtx, e *util.Event) {
|
|
if d.ddlEventCh != nil {
|
|
if d.lease == 0 {
|
|
// If lease is 0, it's always used in test.
|
|
select {
|
|
case d.ddlEventCh <- e:
|
|
default:
|
|
}
|
|
return
|
|
}
|
|
for i := 0; i < 10; i++ {
|
|
select {
|
|
case d.ddlEventCh <- e:
|
|
return
|
|
default:
|
|
logutil.BgLogger().Warn("[ddl] fail to notify DDL event", zap.String("event", e.String()))
|
|
time.Sleep(time.Microsecond * 10)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// NewDDL creates a new DDL.
|
|
func NewDDL(ctx context.Context, options ...Option) DDL {
|
|
return newDDL(ctx, options...)
|
|
}
|
|
|
|
func newDDL(ctx context.Context, options ...Option) *ddl {
|
|
opt := &Options{
|
|
Hook: &BaseCallback{},
|
|
}
|
|
for _, o := range options {
|
|
o(opt)
|
|
}
|
|
|
|
id := uuid.New().String()
|
|
var manager owner.Manager
|
|
var syncer util.SchemaSyncer
|
|
var deadLockCkr util.DeadTableLockChecker
|
|
if etcdCli := opt.EtcdCli; etcdCli == nil {
|
|
// The etcdCli is nil if the store is localstore which is only used for testing.
|
|
// So we use mockOwnerManager and MockSchemaSyncer.
|
|
manager = owner.NewMockManager(ctx, id)
|
|
syncer = NewMockSchemaSyncer()
|
|
} else {
|
|
manager = owner.NewOwnerManager(ctx, etcdCli, ddlPrompt, id, DDLOwnerKey)
|
|
syncer = util.NewSchemaSyncer(etcdCli, id, manager)
|
|
deadLockCkr = util.NewDeadTableLockChecker(etcdCli)
|
|
}
|
|
|
|
ddlCtx := &ddlCtx{
|
|
uuid: id,
|
|
store: opt.Store,
|
|
lease: opt.Lease,
|
|
ddlJobDoneCh: make(chan struct{}, 1),
|
|
ownerManager: manager,
|
|
schemaSyncer: syncer,
|
|
binlogCli: binloginfo.GetPumpsClient(),
|
|
infoHandle: opt.InfoHandle,
|
|
tableLockCkr: deadLockCkr,
|
|
}
|
|
ddlCtx.mu.hook = opt.Hook
|
|
ddlCtx.mu.interceptor = &BaseInterceptor{}
|
|
d := &ddl{
|
|
ddlCtx: ddlCtx,
|
|
limitJobCh: make(chan *limitJobTask, batchAddingJobs),
|
|
}
|
|
|
|
return d
|
|
}
|
|
|
|
// Stop implements DDL.Stop interface.
|
|
func (d *ddl) Stop() error {
|
|
d.m.Lock()
|
|
defer d.m.Unlock()
|
|
|
|
d.close()
|
|
logutil.BgLogger().Info("[ddl] stop DDL", zap.String("ID", d.uuid))
|
|
return nil
|
|
}
|
|
|
|
func (d *ddl) newDeleteRangeManager(mock bool) delRangeManager {
|
|
var delRangeMgr delRangeManager
|
|
if !mock {
|
|
delRangeMgr = newDelRangeManager(d.store, d.sessPool)
|
|
logutil.BgLogger().Info("[ddl] start delRangeManager OK", zap.Bool("is a emulator", !d.store.SupportDeleteRange()))
|
|
} else {
|
|
delRangeMgr = newMockDelRangeManager()
|
|
}
|
|
|
|
delRangeMgr.start()
|
|
return delRangeMgr
|
|
}
|
|
|
|
// Start implements DDL.Start interface.
|
|
func (d *ddl) Start(ctxPool *pools.ResourcePool) error {
|
|
logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", RunWorker))
|
|
d.quitCh = make(chan struct{})
|
|
|
|
d.wg.Add(1)
|
|
go d.limitDDLJobs()
|
|
|
|
// If RunWorker is true, we need campaign owner and do DDL job.
|
|
// Otherwise, we needn't do that.
|
|
if RunWorker {
|
|
err := d.ownerManager.CampaignOwner()
|
|
if err != nil {
|
|
return errors.Trace(err)
|
|
}
|
|
|
|
d.workers = make(map[workerType]*worker, 2)
|
|
d.sessPool = newSessionPool(ctxPool)
|
|
d.delRangeMgr = d.newDeleteRangeManager(ctxPool == nil)
|
|
d.workers[generalWorker] = newWorker(generalWorker, d.store, d.sessPool, d.delRangeMgr)
|
|
d.workers[addIdxWorker] = newWorker(addIdxWorker, d.store, d.sessPool, d.delRangeMgr)
|
|
for _, worker := range d.workers {
|
|
worker.wg.Add(1)
|
|
w := worker
|
|
go w.start(d.ddlCtx)
|
|
|
|
metrics.DDLCounter.WithLabelValues(fmt.Sprintf("%s_%s", metrics.CreateDDL, worker.String())).Inc()
|
|
|
|
// When the start function is called, we will send a fake job to let worker
|
|
// checks owner firstly and try to find whether a job exists and run.
|
|
asyncNotify(worker.ddlJobCh)
|
|
}
|
|
|
|
go d.schemaSyncer.StartCleanWork()
|
|
if config.TableLockEnabled() {
|
|
d.wg.Add(1)
|
|
go d.startCleanDeadTableLock()
|
|
}
|
|
metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc()
|
|
}
|
|
|
|
variable.RegisterStatistics(d)
|
|
|
|
metrics.DDLCounter.WithLabelValues(metrics.CreateDDLInstance).Inc()
|
|
return nil
|
|
}
|
|
|
|
func (d *ddl) close() {
|
|
if isChanClosed(d.quitCh) {
|
|
return
|
|
}
|
|
|
|
startTime := time.Now()
|
|
close(d.quitCh)
|
|
d.wg.Wait()
|
|
d.ownerManager.Cancel()
|
|
d.schemaSyncer.CloseCleanWork()
|
|
err := d.schemaSyncer.RemoveSelfVersionPath()
|
|
if err != nil {
|
|
logutil.BgLogger().Error("[ddl] remove self version path failed", zap.Error(err))
|
|
}
|
|
|
|
for _, worker := range d.workers {
|
|
worker.close()
|
|
}
|
|
// d.delRangeMgr using sessions from d.sessPool.
|
|
// Put it before d.sessPool.close to reduce the time spent by d.sessPool.close.
|
|
if d.delRangeMgr != nil {
|
|
d.delRangeMgr.clear()
|
|
}
|
|
if d.sessPool != nil {
|
|
d.sessPool.close()
|
|
}
|
|
|
|
logutil.BgLogger().Info("[ddl] DDL closed", zap.String("ID", d.uuid), zap.Duration("take time", time.Since(startTime)))
|
|
}
|
|
|
|
// GetLease implements DDL.GetLease interface.
|
|
func (d *ddl) GetLease() time.Duration {
|
|
d.m.RLock()
|
|
lease := d.lease
|
|
d.m.RUnlock()
|
|
return lease
|
|
}
|
|
|
|
// GetInfoSchemaWithInterceptor gets the infoschema binding to d. It's exported for testing.
|
|
// Please don't use this function, it is used by TestParallelDDLBeforeRunDDLJob to intercept the calling of d.infoHandle.Get(), use d.infoHandle.Get() instead.
|
|
// Otherwise, the TestParallelDDLBeforeRunDDLJob will hang up forever.
|
|
func (d *ddl) GetInfoSchemaWithInterceptor(ctx sessionctx.Context) infoschema.InfoSchema {
|
|
is := d.infoHandle.Get()
|
|
|
|
d.mu.RLock()
|
|
defer d.mu.RUnlock()
|
|
return d.mu.interceptor.OnGetInfoSchema(ctx, is)
|
|
}
|
|
|
|
func (d *ddl) genGlobalIDs(count int) ([]int64, error) {
|
|
var ret []int64
|
|
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
|
|
failpoint.Inject("mockGenGlobalIDFail", func(val failpoint.Value) {
|
|
if val.(bool) {
|
|
failpoint.Return(errors.New("gofail genGlobalIDs error"))
|
|
}
|
|
})
|
|
|
|
m := meta.NewMeta(txn)
|
|
var err error
|
|
ret, err = m.GenGlobalIDs(count)
|
|
return err
|
|
})
|
|
|
|
return ret, err
|
|
}
|
|
|
|
// SchemaSyncer implements DDL.SchemaSyncer interface.
|
|
func (d *ddl) SchemaSyncer() util.SchemaSyncer {
|
|
return d.schemaSyncer
|
|
}
|
|
|
|
// OwnerManager implements DDL.OwnerManager interface.
|
|
func (d *ddl) OwnerManager() owner.Manager {
|
|
return d.ownerManager
|
|
}
|
|
|
|
// GetID implements DDL.GetID interface.
|
|
func (d *ddl) GetID() string {
|
|
return d.uuid
|
|
}
|
|
|
|
func checkJobMaxInterval(job *model.Job) time.Duration {
|
|
// The job of adding index takes more time to process.
|
|
// So it uses the longer time.
|
|
if job.Type == model.ActionAddIndex || job.Type == model.ActionAddPrimaryKey {
|
|
return 3 * time.Second
|
|
}
|
|
if job.Type == model.ActionCreateTable || job.Type == model.ActionCreateSchema {
|
|
return 500 * time.Millisecond
|
|
}
|
|
return 1 * time.Second
|
|
}
|
|
|
|
func (d *ddl) asyncNotifyWorker(jobTp model.ActionType) {
|
|
// If the workers don't run, we needn't to notify workers.
|
|
if !RunWorker {
|
|
return
|
|
}
|
|
|
|
if jobTp == model.ActionAddIndex || jobTp == model.ActionAddPrimaryKey {
|
|
asyncNotify(d.workers[addIdxWorker].ddlJobCh)
|
|
} else {
|
|
asyncNotify(d.workers[generalWorker].ddlJobCh)
|
|
}
|
|
}
|
|
|
|
func (d *ddl) doDDLJob(ctx sessionctx.Context, job *model.Job) error {
|
|
// Get a global job ID and put the DDL job in the queue.
|
|
job.Query, _ = ctx.Value(sessionctx.QueryString).(string)
|
|
task := &limitJobTask{job, make(chan error)}
|
|
d.limitJobCh <- task
|
|
err := <-task.err
|
|
if err != nil {
|
|
return errors.Trace(err)
|
|
}
|
|
ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue = true
|
|
|
|
// Notice worker that we push a new job and wait the job done.
|
|
d.asyncNotifyWorker(job.Type)
|
|
logutil.BgLogger().Info("[ddl] start DDL job", zap.String("job", job.String()), zap.String("query", job.Query))
|
|
|
|
var historyJob *model.Job
|
|
jobID := job.ID
|
|
// For a job from start to end, the state of it will be none -> delete only -> write only -> reorganization -> public
|
|
// For every state changes, we will wait as lease 2 * lease time, so here the ticker check is 10 * lease.
|
|
// But we use etcd to speed up, normally it takes less than 0.5s now, so we use 0.5s or 1s or 3s as the max value.
|
|
ticker := time.NewTicker(chooseLeaseTime(10*d.lease, checkJobMaxInterval(job)))
|
|
startTime := time.Now()
|
|
metrics.JobsGauge.WithLabelValues(job.Type.String()).Inc()
|
|
defer func() {
|
|
ticker.Stop()
|
|
metrics.JobsGauge.WithLabelValues(job.Type.String()).Dec()
|
|
metrics.HandleJobHistogram.WithLabelValues(job.Type.String(), metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
|
|
}()
|
|
for {
|
|
select {
|
|
case <-d.ddlJobDoneCh:
|
|
case <-ticker.C:
|
|
}
|
|
|
|
historyJob, err = d.getHistoryDDLJob(jobID)
|
|
if err != nil {
|
|
logutil.BgLogger().Error("[ddl] get history DDL job failed, check again", zap.Error(err))
|
|
continue
|
|
} else if historyJob == nil {
|
|
logutil.BgLogger().Debug("[ddl] DDL job is not in history, maybe not run", zap.Int64("jobID", jobID))
|
|
continue
|
|
}
|
|
|
|
// If a job is a history job, the state must be JobStateSynced or JobStateRollbackDone or JobStateCancelled.
|
|
if historyJob.IsSynced() {
|
|
logutil.BgLogger().Info("[ddl] DDL job is finished", zap.Int64("jobID", jobID))
|
|
return nil
|
|
}
|
|
|
|
if historyJob.Error != nil {
|
|
return errors.Trace(historyJob.Error)
|
|
}
|
|
panic("When the state is JobStateRollbackDone or JobStateCancelled, historyJob.Error should never be nil")
|
|
}
|
|
}
|
|
|
|
func (d *ddl) callHookOnChanged(err error) error {
|
|
d.mu.RLock()
|
|
defer d.mu.RUnlock()
|
|
|
|
err = d.mu.hook.OnChanged(err)
|
|
return errors.Trace(err)
|
|
}
|
|
|
|
// SetBinlogClient implements DDL.SetBinlogClient interface.
|
|
func (d *ddl) SetBinlogClient(binlogCli *pumpcli.PumpsClient) {
|
|
d.binlogCli = binlogCli
|
|
}
|
|
|
|
// GetHook implements DDL.GetHook interface.
|
|
func (d *ddl) GetHook() Callback {
|
|
d.mu.Lock()
|
|
defer d.mu.Unlock()
|
|
|
|
return d.mu.hook
|
|
}
|
|
|
|
func (d *ddl) startCleanDeadTableLock() {
|
|
defer func() {
|
|
goutil.Recover(metrics.LabelDDL, "startCleanDeadTableLock", nil, false)
|
|
d.wg.Done()
|
|
}()
|
|
|
|
ticker := time.NewTicker(time.Second * 10)
|
|
defer ticker.Stop()
|
|
for {
|
|
select {
|
|
case <-ticker.C:
|
|
if !d.ownerManager.IsOwner() {
|
|
continue
|
|
}
|
|
if d.infoHandle == nil || !d.infoHandle.IsValid() {
|
|
continue
|
|
}
|
|
deadLockTables, err := d.tableLockCkr.GetDeadLockedTables(d.quitCh, d.infoHandle.Get().AllSchemas())
|
|
if err != nil {
|
|
logutil.BgLogger().Info("[ddl] get dead table lock failed.", zap.Error(err))
|
|
continue
|
|
}
|
|
for se, tables := range deadLockTables {
|
|
err := d.CleanDeadTableLock(tables, se)
|
|
if err != nil {
|
|
logutil.BgLogger().Info("[ddl] clean dead table lock failed.", zap.Error(err))
|
|
}
|
|
}
|
|
case <-d.quitCh:
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
// RecoverInfo contains information needed by DDL.RecoverTable.
|
|
type RecoverInfo struct {
|
|
SchemaID int64
|
|
TableInfo *model.TableInfo
|
|
DropJobID int64
|
|
SnapshotTS uint64
|
|
CurAutoIncID int64
|
|
CurAutoRandID int64
|
|
}
|
|
|