backup_storage.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. // Copyright 2019 Yunion
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package models
  15. import (
  16. "context"
  17. "net/url"
  18. "time"
  19. "yunion.io/x/jsonutils"
  20. "yunion.io/x/log"
  21. "yunion.io/x/pkg/errors"
  22. "yunion.io/x/pkg/utils"
  23. "yunion.io/x/sqlchemy"
  24. api "yunion.io/x/onecloud/pkg/apis/compute"
  25. "yunion.io/x/onecloud/pkg/cloudcommon/db"
  26. "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman"
  27. "yunion.io/x/onecloud/pkg/hostman/storageman/backupstorage"
  28. _ "yunion.io/x/onecloud/pkg/hostman/storageman/backupstorage/nfs"
  29. _ "yunion.io/x/onecloud/pkg/hostman/storageman/backupstorage/object"
  30. "yunion.io/x/onecloud/pkg/httperrors"
  31. "yunion.io/x/onecloud/pkg/mcclient"
  32. "yunion.io/x/onecloud/pkg/util/stringutils2"
  33. )
  34. // +onecloud:swagger-gen-model-singular=backupstorage
  35. // +onecloud:swagger-gen-model-plural=backupstorages
  36. type SBackupStorageManager struct {
  37. db.SEnabledStatusInfrasResourceBaseManager
  38. }
  39. type SBackupStorage struct {
  40. db.SEnabledStatusInfrasResourceBase
  41. AccessInfo *api.SBackupStorageAccessInfo
  42. StorageType api.TBackupStorageType `width:"32" charset:"ascii" nullable:"false" list:"user" create:"domain_required"`
  43. CapacityMb int `nullable:"false" list:"user" update:"domain" create:"domain_optional"`
  44. }
  45. var BackupStorageManager *SBackupStorageManager
  46. func init() {
  47. BackupStorageManager = &SBackupStorageManager{
  48. SEnabledStatusInfrasResourceBaseManager: db.NewEnabledStatusInfrasResourceBaseManager(
  49. SBackupStorage{},
  50. "backupstorages_tbl",
  51. "backupstorage",
  52. "backupstorages",
  53. ),
  54. }
  55. BackupStorageManager.SetVirtualObject(BackupStorageManager)
  56. }
  57. func (bs *SBackupStorageManager) ValidateCreateData(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, query jsonutils.JSONObject, input api.BackupStorageCreateInput) (api.BackupStorageCreateInput, error) {
  58. var err error
  59. input.EnabledStatusInfrasResourceBaseCreateInput, err = bs.SEnabledStatusInfrasResourceBaseManager.ValidateCreateData(ctx, userCred, ownerId, query, input.EnabledStatusInfrasResourceBaseCreateInput)
  60. if err != nil {
  61. return input, err
  62. }
  63. if !utils.IsInArray(input.StorageType, []string{string(api.BACKUPSTORAGE_TYPE_NFS), string(api.BACKUPSTORAGE_TYPE_OBJECT_STORAGE)}) {
  64. return input, httperrors.NewInputParameterError("Invalid storage type %s", input.StorageType)
  65. }
  66. switch input.StorageType {
  67. case string(api.BACKUPSTORAGE_TYPE_NFS):
  68. if input.NfsHost == "" {
  69. return input, httperrors.NewInputParameterError("nfs_host is required when storage type is nfs")
  70. }
  71. if input.NfsSharedDir == "" {
  72. return input, httperrors.NewInputParameterError("nfs_shared_dir is required when storage type is nfs")
  73. }
  74. case string(api.BACKUPSTORAGE_TYPE_OBJECT_STORAGE):
  75. if input.ObjectBucketUrl == "" {
  76. return input, httperrors.NewInputParameterError("object_bucket_url is required when storage type is object")
  77. }
  78. _, err := url.Parse(input.ObjectBucketUrl)
  79. if err != nil {
  80. return input, httperrors.NewInputParameterError("invalid object_bucket_url %s: %s", input.ObjectBucketUrl, err)
  81. }
  82. if input.ObjectAccessKey == "" {
  83. return input, httperrors.NewInputParameterError("object_access_key is required when storage type is object")
  84. }
  85. if input.ObjectSecret == "" {
  86. return input, httperrors.NewInputParameterError("object_secret is required when storage type is object")
  87. }
  88. if len(input.ObjectBucketUrlExt) > 0 {
  89. _, err := url.Parse(input.ObjectBucketUrlExt)
  90. if err != nil {
  91. return input, httperrors.NewInputParameterError("invalid object_bucket_url_ext %s: %s", input.ObjectBucketUrlExt, err)
  92. }
  93. }
  94. }
  95. return input, nil
  96. }
  97. func (bs *SBackupStorage) CustomizeCreate(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, query jsonutils.JSONObject, data jsonutils.JSONObject) error {
  98. bs.SetEnabled(true)
  99. input := api.BackupStorageCreateInput{}
  100. err := data.Unmarshal(&input)
  101. if err != nil {
  102. return errors.Wrap(err, "Unmarshal BackupStorageCreateInput")
  103. }
  104. // nfsHost, _ := data.GetString("nfs_host")
  105. // nfsSharedDir, _ := data.GetString("nfs_shared_dir")
  106. bs.Status = api.BACKUPSTORAGE_STATUS_ONLINE
  107. bs.AccessInfo = &api.SBackupStorageAccessInfo{
  108. NfsHost: input.NfsHost,
  109. NfsSharedDir: input.NfsSharedDir,
  110. ObjectBucketUrl: input.ObjectBucketUrl,
  111. ObjectAccessKey: input.ObjectAccessKey,
  112. ObjectSecret: input.ObjectSecret,
  113. ObjectSignVer: input.ObjectSignVer,
  114. ObjectBucketUrlExt: input.ObjectBucketUrlExt,
  115. }
  116. return bs.SEnabledStatusInfrasResourceBase.CustomizeCreate(ctx, userCred, ownerId, query, data)
  117. }
  118. func (bs *SBackupStorage) BackupCount() (int, error) {
  119. return DiskBackupManager.Query().Equals("backup_storage_id", bs.GetId()).CountWithError()
  120. }
  121. func (bs *SBackupStorage) ValidateDeleteCondition(ctx context.Context, info jsonutils.JSONObject) error {
  122. cnt, err := bs.BackupCount()
  123. if err != nil {
  124. return httperrors.NewInternalServerError("BackupCount fail %s", err)
  125. }
  126. if cnt > 0 {
  127. return httperrors.NewNotEmptyError("storage has been used")
  128. }
  129. return bs.SEnabledStatusInfrasResourceBase.ValidateDeleteCondition(ctx, nil)
  130. }
  131. func (bs *SBackupStorage) PostCreate(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, query jsonutils.JSONObject, data jsonutils.JSONObject) {
  132. bs.SEnabledStatusInfrasResourceBase.PostCreate(ctx, userCred, ownerId, query, data)
  133. bs.SetStatus(ctx, userCred, api.BACKUPSTORAGE_STATUS_OFFLINE, "")
  134. if bs.StorageType == api.BACKUPSTORAGE_TYPE_OBJECT_STORAGE {
  135. err := bs.saveObjectSecret(bs.AccessInfo.ObjectSecret)
  136. if err != nil {
  137. log.Errorf("convert object secret fail %s", err)
  138. }
  139. }
  140. err := bs.startSyncStatusTask(ctx, userCred, "")
  141. if err != nil {
  142. log.Errorf("unable to sync backup storage status")
  143. }
  144. }
  145. func (bs *SBackupStorage) startSyncStatusTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error {
  146. return StartResourceSyncStatusTask(ctx, userCred, bs, "BackupStorageSyncstatusTask", parentTaskId)
  147. }
  148. func (bs *SBackupStorage) saveObjectSecret(secret string) error {
  149. sec, err := utils.EncryptAESBase64(bs.Id, secret)
  150. if err != nil {
  151. return errors.Wrap(err, "EncryptAESBase64")
  152. }
  153. accessInfo := *bs.AccessInfo
  154. accessInfo.ObjectSecret = sec
  155. _, err = db.Update(bs, func() error {
  156. bs.AccessInfo = &accessInfo
  157. return nil
  158. })
  159. return errors.Wrap(err, "Update")
  160. }
  161. func (bs *SBackupStorage) getMoreDetails(ctx context.Context, out api.BackupStorageDetails) api.BackupStorageDetails {
  162. out.NfsHost = bs.AccessInfo.NfsHost
  163. out.NfsSharedDir = bs.AccessInfo.NfsSharedDir
  164. out.ObjectBucketUrl = bs.AccessInfo.ObjectBucketUrl
  165. out.ObjectAccessKey = bs.AccessInfo.ObjectAccessKey
  166. out.ObjectSignVer = bs.AccessInfo.ObjectSignVer
  167. out.ObjectBucketUrlExt = bs.AccessInfo.ObjectBucketUrlExt
  168. // should not return secret
  169. out.ObjectSecret = "" // bs.AccessInfo.ObjectSecret
  170. return out
  171. }
  172. func (bm *SBackupStorageManager) FetchCustomizeColumns(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, objs []interface{}, fields stringutils2.SSortedStrings, isList bool) []api.BackupStorageDetails {
  173. rows := make([]api.BackupStorageDetails, len(objs))
  174. esiRows := bm.SEnabledStatusInfrasResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList)
  175. for i := range rows {
  176. rows[i].EnabledStatusInfrasResourceBaseDetails = esiRows[i]
  177. bs := objs[i].(*SBackupStorage)
  178. rows[i] = bs.getMoreDetails(ctx, rows[i])
  179. }
  180. return rows
  181. }
  182. func (self *SBackupStorage) GetRegionDriver() IRegionDriver {
  183. return GetRegionDriver(api.CLOUD_PROVIDER_ONECLOUD)
  184. }
  185. func (bm *SBackupStorageManager) ListItemFilter(ctx context.Context, q *sqlchemy.SQuery, userCred mcclient.TokenCredential, input api.BackupStorageListInput) (*sqlchemy.SQuery, error) {
  186. var err error
  187. q, err = bm.SEnabledStatusInfrasResourceBaseManager.ListItemFilter(ctx, q, userCred, input.EnabledStatusInfrasResourceBaseListInput)
  188. if err != nil {
  189. return nil, errors.Wrap(err, "SEnabledStatusInfrasResourceBaseManager.ListItemFilter")
  190. }
  191. if len(input.ServerId) > 0 {
  192. serverObj, err := GuestManager.FetchByIdOrName(ctx, userCred, input.ServerId)
  193. if err != nil {
  194. if errors.Cause(err) == errors.ErrNotFound {
  195. return nil, httperrors.NewResourceNotFoundError2(GuestManager.Keyword(), input.ServerId)
  196. } else {
  197. return nil, errors.Wrap(err, "GuestManager.FetchByIdOrName")
  198. }
  199. }
  200. server := serverObj.(*SGuest)
  201. input.ServerId = server.Id
  202. hostIds, err := server.getDisksCandidateHostIds()
  203. if err != nil {
  204. return nil, errors.Wrap(err, "getDisksCandidateHostIds")
  205. }
  206. q = bm.filterByCandidateHostIds(q, hostIds)
  207. }
  208. if len(input.DiskId) > 0 {
  209. diskObj, err := DiskManager.FetchByIdOrName(ctx, userCred, input.DiskId)
  210. if err != nil {
  211. if errors.Cause(err) == errors.ErrNotFound {
  212. return nil, httperrors.NewResourceNotFoundError2(DiskManager.Keyword(), input.DiskId)
  213. } else {
  214. return nil, errors.Wrap(err, "DiskManager.FetchByIdOrName")
  215. }
  216. }
  217. disk := diskObj.(*SDisk)
  218. input.DiskId = disk.Id
  219. hostIds, err := disk.getCandidateHostIds()
  220. if err != nil {
  221. return nil, errors.Wrap(err, "getDisksCandidateHostIds")
  222. }
  223. q = bm.filterByCandidateHostIds(q, hostIds)
  224. }
  225. return q, nil
  226. }
  227. func (bm *SBackupStorageManager) filterByCandidateHostIds(q *sqlchemy.SQuery, candidateIds []string) *sqlchemy.SQuery {
  228. hbsSubQ := HostBackupstorageManager.Query().SubQuery()
  229. q = q.LeftJoin(hbsSubQ, sqlchemy.Equals(q.Field("id"), hbsSubQ.Field("backupstorage_id")))
  230. q = q.Filter(sqlchemy.OR(
  231. sqlchemy.IsNull(hbsSubQ.Field("host_id")),
  232. sqlchemy.In(hbsSubQ.Field("host_id"), candidateIds),
  233. ))
  234. return q
  235. }
  236. func (bs *SBackupStorage) PerformSyncstatus(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.DiskBackupSyncstatusInput) (jsonutils.JSONObject, error) {
  237. var openTask = true
  238. count, err := taskman.TaskManager.QueryTasksOfObject(bs, time.Now().Add(-3*time.Minute), &openTask).CountWithError()
  239. if err != nil {
  240. return nil, err
  241. }
  242. if count > 0 {
  243. return nil, httperrors.NewBadRequestError("Backup has %d task active, can't sync status", count)
  244. }
  245. return nil, bs.startSyncStatusTask(ctx, userCred, "")
  246. }
  247. func (bs *SBackupStorage) ValidateUpdateData(
  248. ctx context.Context,
  249. userCred mcclient.TokenCredential,
  250. query jsonutils.JSONObject,
  251. input api.BackupStorageUpdateInput,
  252. ) (api.BackupStorageUpdateInput, error) {
  253. var err error
  254. if len(input.Name) > 0 {
  255. err := isValidBucketName(input.Name)
  256. if err != nil {
  257. return input, httperrors.NewInputParameterError("invalid bucket name(%s): %s", input.Name, err)
  258. }
  259. }
  260. input.EnabledStatusInfrasResourceBaseUpdateInput, err = bs.SEnabledStatusInfrasResourceBase.ValidateUpdateData(ctx, userCred, query, input.EnabledStatusInfrasResourceBaseUpdateInput)
  261. if err != nil {
  262. return input, errors.Wrap(err, "SSharableVirtualResourceBase.ValidateUpdateData")
  263. }
  264. return input, nil
  265. }
  266. func (bs *SBackupStorage) PostUpdate(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) {
  267. bs.SEnabledStatusInfrasResourceBase.PostUpdate(ctx, userCred, query, data)
  268. input := api.BackupStorageUpdateInput{}
  269. err := data.Unmarshal(&input)
  270. if err != nil {
  271. log.Errorf("SBackupStorage.PostUpdate Unmarshal data %s fail %s", data, err)
  272. return
  273. }
  274. // update accessinfo
  275. accessInfoChanged := false
  276. accessInfo := *bs.AccessInfo
  277. switch bs.StorageType {
  278. case api.BACKUPSTORAGE_TYPE_NFS:
  279. if len(input.NfsHost) > 0 {
  280. accessInfo.NfsHost = input.NfsHost
  281. accessInfoChanged = true
  282. }
  283. if len(input.NfsSharedDir) > 0 {
  284. accessInfo.NfsSharedDir = input.NfsSharedDir
  285. accessInfoChanged = true
  286. }
  287. case api.BACKUPSTORAGE_TYPE_OBJECT_STORAGE:
  288. if len(input.ObjectBucketUrl) > 0 {
  289. accessInfo.ObjectBucketUrl = input.ObjectBucketUrl
  290. accessInfoChanged = true
  291. }
  292. if len(input.ObjectAccessKey) > 0 {
  293. accessInfo.ObjectAccessKey = input.ObjectAccessKey
  294. accessInfoChanged = true
  295. }
  296. if len(input.ObjectSecret) > 0 {
  297. sec, err := utils.EncryptAESBase64(bs.Id, input.ObjectSecret)
  298. if err != nil {
  299. log.Errorf("EncryptAESBase64 fail %s", err)
  300. return
  301. }
  302. accessInfo.ObjectSecret = sec
  303. accessInfoChanged = true
  304. }
  305. if input.ObjectSignVer != accessInfo.ObjectSignVer {
  306. accessInfo.ObjectSignVer = input.ObjectSignVer
  307. accessInfoChanged = true
  308. }
  309. if len(input.ObjectBucketUrlExt) > 0 {
  310. accessInfo.ObjectBucketUrlExt = input.ObjectBucketUrlExt
  311. accessInfoChanged = true
  312. }
  313. }
  314. if accessInfoChanged {
  315. _, err = db.Update(bs, func() error {
  316. bs.AccessInfo = &accessInfo
  317. return nil
  318. })
  319. if err != nil {
  320. log.Errorf("update fail %s", err)
  321. } else {
  322. err := StartResourceSyncStatusTask(ctx, userCred, bs, "BackupStorageSyncstatusTask", "")
  323. if err != nil {
  324. log.Errorf("unable to sync backup storage status")
  325. }
  326. }
  327. }
  328. }
  329. func (bs *SBackupStorage) GetAccessInfo() (*api.SBackupStorageAccessInfo, error) {
  330. accessInfo := *bs.AccessInfo
  331. switch bs.StorageType {
  332. case api.BACKUPSTORAGE_TYPE_OBJECT_STORAGE:
  333. secret, err := utils.DescryptAESBase64(bs.Id, accessInfo.ObjectSecret)
  334. if err != nil {
  335. return nil, errors.Wrap(err, "DescryptAESBase64")
  336. }
  337. accessInfo.ObjectSecret = secret
  338. }
  339. return &accessInfo, nil
  340. }
  341. func (bs *SBackupStorage) Delete(ctx context.Context, userCred mcclient.TokenCredential) error {
  342. log.Infof("Host delete do nothing")
  343. // cleanup hostbackupstorage
  344. hbs, err := HostBackupstorageManager.GetBackupStoragesByBackup(bs.Id)
  345. if err != nil {
  346. return errors.Wrap(err, "GetBackupStoragesByBackup")
  347. }
  348. var errs []error
  349. for i := range hbs {
  350. err := hbs[i].Detach(ctx, userCred)
  351. if err != nil {
  352. errs = append(errs, errors.Wrapf(err, "Detach %s %s", hbs[i].HostId, hbs[i].BackupstorageId))
  353. }
  354. }
  355. if len(errs) > 0 {
  356. return errors.NewAggregate(errs)
  357. }
  358. return bs.SEnabledStatusInfrasResourceBase.Delete(ctx, userCred)
  359. }
  360. func (bs *SBackupStorage) GetIBackupStorage() (backupstorage.IBackupStorage, error) {
  361. accessInfo, err := bs.GetAccessInfo()
  362. if err != nil {
  363. return nil, errors.Wrap(err, "GetAccessInfo")
  364. }
  365. log.Infof("GetIBackupStorage %s %s", bs.Id, accessInfo.String())
  366. ibs, err := backupstorage.GetBackupStorage(bs.Id, jsonutils.Marshal(accessInfo).(*jsonutils.JSONDict))
  367. if err != nil {
  368. return nil, errors.Wrap(err, "GetBackupStorage")
  369. }
  370. return ibs, nil
  371. }