0

我們所有的數據庫都從內部遷移到亞馬遜aurora.our數據庫的大小大約是136GB,而且每個表的數據量都是幾百萬。在滿載完成數以百萬計的行後,大約有200,000到300,000行被遷移.WE不知道我們在哪裏下降,因爲我們是DMS的新手。任何人都知道我們如何遷移精確的行數。AWS DMS沒有給予100%的遷移

migration type :full load 

這是我們的AWS DMS任務設置

{ 
    "TargetMetadata": { 
    "TargetSchema": "", 
    "SupportLobs": true, 
    "FullLobMode": true, 
    "LobChunkSize": 64, 
    "LimitedSizeLobMode": false, 
    "LobMaxSize": 0, 
    "LoadMaxFileSize": 0, 
    "ParallelLoadThreads": 0, 
    "BatchApplyEnabled": false 
    }, 
    "FullLoadSettings": { 
    "FullLoadEnabled": true, 
    "ApplyChangesEnabled": false, 
    "TargetTablePrepMode": "TRUNCATE_BEFORE_LOAD", 
    "CreatePkAfterFullLoad": false, 
    "StopTaskCachedChangesApplied": false, 
    "StopTaskCachedChangesNotApplied": false, 
    "ResumeEnabled": false, 
    "ResumeMinTableSize": 100000, 
    "ResumeOnlyClusteredPKTables": true, 
    "MaxFullLoadSubTasks": 15, 
    "TransactionConsistencyTimeout": 600, 
    "CommitRate": 10000 
    }, 
    "Logging": { 
    "EnableLogging": true, 
    "LogComponents": [ 
     { 
     "Id": "SOURCE_UNLOAD", 
     "Severity": "LOGGER_SEVERITY_DEFAULT" 
     }, 
     { 
     "Id": "SOURCE_CAPTURE", 
     "Severity": "LOGGER_SEVERITY_DEFAULT" 
     }, 
     { 
     "Id": "TARGET_LOAD", 
     "Severity": "LOGGER_SEVERITY_DEFAULT" 
     }, 
     { 
     "Id": "TARGET_APPLY", 
     "Severity": "LOGGER_SEVERITY_DEFAULT" 
     }, 
     { 
     "Id": "TASK_MANAGER", 
     "Severity": "LOGGER_SEVERITY_DEFAULT" 
     } 
    ], 
    "CloudWatchLogGroup": "dms-tasks-krishna-smartdata", 
    "CloudWatchLogStream": "dms-task-UERQWLR6AYHYIEKMR3HN2VL7T4" 
    }, 
    "ControlTablesSettings": { 
    "historyTimeslotInMinutes": 5, 
    "ControlSchema": "", 
    "HistoryTimeslotInMinutes": 5, 
    "HistoryTableEnabled": true, 
    "SuspendedTablesTableEnabled": true, 
    "StatusTableEnabled": true 
    }, 
    "StreamBufferSettings": { 
    "StreamBufferCount": 3, 
    "StreamBufferSizeInMB": 8, 
    "CtrlStreamBufferSizeInMB": 5 
    }, 
    "ChangeProcessingDdlHandlingPolicy": { 
    "HandleSourceTableDropped": true, 
    "HandleSourceTableTruncated": true, 
    "HandleSourceTableAltered": true 
    }, 
    "ErrorBehavior": { 
    "DataErrorPolicy": "LOG_ERROR", 
    "DataTruncationErrorPolicy": "LOG_ERROR", 
    "DataErrorEscalationPolicy": "SUSPEND_TABLE", 
    "DataErrorEscalationCount": 0, 
    "TableErrorPolicy": "SUSPEND_TABLE", 
    "TableErrorEscalationPolicy": "STOP_TASK", 
    "TableErrorEscalationCount": 0, 
    "RecoverableErrorCount": -1, 
    "RecoverableErrorInterval": 5, 
    "RecoverableErrorThrottling": true, 
    "RecoverableErrorThrottlingMax": 1800, 
    "ApplyErrorDeletePolicy": "IGNORE_RECORD", 
    "ApplyErrorInsertPolicy": "LOG_ERROR", 
    "ApplyErrorUpdatePolicy": "LOG_ERROR", 
    "ApplyErrorEscalationPolicy": "LOG_ERROR", 
    "ApplyErrorEscalationCount": 0, 
    "FullLoadIgnoreConflicts": true 
    }, 
    "ChangeProcessingTuning": { 
    "BatchApplyPreserveTransaction": true, 
    "BatchApplyTimeoutMin": 1, 
    "BatchApplyTimeoutMax": 30, 
    "BatchApplyMemoryLimit": 500, 
    "BatchSplitSize": 0, 
    "MinTransactionSize": 1000, 
    "CommitTimeout": 1, 
    "MemoryLimitTotal": 1024, 
    "MemoryKeepTime": 60, 
    "StatementCacheSize": 50 
    } 
} 

映射方法:

{ 
    "rules": [ 
    { 
     "rule-type": "selection", 
     "rule-id": "1", 
     "rule-name": "1", 
     "object-locator": { 
     "schema-name": "dbo", 
     "table-name": "%" 
     }, 
     "rule-action": "include" 
    }, 
    { 
     "rule-type": "transformation", 
     "rule-id": "2", 
     "rule-name": "2", 
     "rule-target": "schema", 
     "object-locator": { 
     "schema-name": "dbo" 
     }, 
     "rule-action": "rename", 
     "value": "smartdata_int" 
    } 
    ] 
} 

回答

0

你應該爲每個DMS任務設置CloudWatch的日誌選項。你是否檢查過這個任務的日誌?你有varchar/text列> 32KB嗎?將數據遷移到像紅移這樣的目標時,這些內容會被截斷,因此請注意,這將會計入您的錯誤計數。

+0

是的,它有這個列在遷移過程中被截斷。我甚至試圖增加到64Kb,但仍然失敗 –