当前位置:   article > 正文

4.19内核SLUB内存分配器

slub内存分配

初始化

内核的大部分管理数据结构都是通过kmalloc分配内存的,那么slab本身结构的内存管理就出现了一个鸡与蛋的问题,slab数据结构所需内存远小于一整页的内存块,这些最适合kmalloc分配,而kmalloc只有在slab初始化完之后才能使用。因此在kmem_cache_init中静态定义了两个kmem_cache。

kmem_cache & kmem_cache_node。

kmem_cache是分配kmem_cache结构体的slub分配器。kmem_cache_node是分配kmem_cache->node用的。kmem_cache_init中会初始化分配node

的kmem_cache_node,而里面会分配node,这种情况下必然存在一个蛋与鸡的故事,如何处理的?参考如下。看这个(状态enum来决定第一次调用early_kmem_cache_node_alloc) ?

Linux 内存管理之 SLUB分配器(5):slub初始化过程_And乔的博客-CSDN博客

  1. 初始化静态定义的kmem_cache:
  2. create_boot_cache //static struct [kmem_cache boot_kmem_cache,boot_kmem_cache_node]
  3. __kmem_cache_create
  4. kmem_cache_open
  5. init_kmem_cache_nodes //使用boot_kmem_cache_node 来分配node
  6. 1:early_kmem_cache_node_alloc //由于boot_kmem_cache_node没有就绪,自身的node 使用该接口来分配,使用的是buddy
  7. 2:kmem_cache_alloc_node //其它的node分配直接使用boot_kmem_cache_node
  8. slab_alloc_node //会访问到node。而此时node需要分配
  9. alloc_kmem_cache_cpus
  10. sysfs_slab_add
  11. 创建其它默认大小的slub,供kmalloc使用:
  12. create_kmalloc_caches
  13. new_kmalloc_cache
  14. create_kmalloc_cache
  15. kmem_cache_zalloc
  16. create_boot_cache
  17. 其它动态申请大小的slub:
  18. kmem_cache_create
  19. cache_create
  20. __kmem_cache_create

 

start_kernel->mm_init->kmem_cache_init

  1. void __init kmem_cache_init(void)
  2. {
  3. static __initdata struct kmem_cache boot_kmem_cache,
  4. boot_kmem_cache_node;
  5. if (debug_guardpage_minorder())
  6. slub_max_order = 0;
  7. kmem_cache_node = &boot_kmem_cache_node;
  8. kmem_cache = &boot_kmem_cache;
  9. create_boot_cache(kmem_cache_node, "kmem_cache_node",
  10. sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
  11. register_hotmemory_notifier(&slab_memory_callback_nb);
  12. /* Able to allocate the per node structures */
  13. slab_state = PARTIAL;
  14. create_boot_cache(kmem_cache, "kmem_cache",
  15. offsetof(struct kmem_cache, node) +
  16. nr_node_ids * sizeof(struct kmem_cache_node *),
  17. SLAB_HWCACHE_ALIGN, 0, 0);
  18. kmem_cache = bootstrap(&boot_kmem_cache);
  19. kmem_cache_node = bootstrap(&boot_kmem_cache_node);
  20. /* Now we can use the kmem_cache to allocate kmalloc slabs */
  21. setup_kmalloc_cache_index_table();
  22. create_kmalloc_caches(0);
  23. /* Setup random freelists for each cache */
  24. init_freelist_randomization();
  25. cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
  26. slub_cpu_dead);
  27. pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
  28. cache_line_size(),
  29. slub_min_order, slub_max_order, slub_min_objects,
  30. nr_cpu_ids, nr_node_ids);
  31. }

create_kmalloc_caches中循环通过new_kmalloc_cache创建kmem_cache。 

  1. struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __ro_after_init;
  2. EXPORT_SYMBOL(kmalloc_caches);
  3. #ifdef CONFIG_ZONE_DMA
  4. struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1] __ro_after_init;
  5. EXPORT_SYMBOL(kmalloc_dma_caches);
  6. #endif
  7. void __init create_kmalloc_caches(slab_flags_t flags)
  8. {
  9. int i, type;
  10. for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
  11. for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
  12. if (!kmalloc_caches[type][i])
  13. new_kmalloc_cache(i, type, flags); //创建kmem_cache数组。
  14. /*
  15. * Caches that are not of the two-to-the-power-of size.
  16. * These have to be created immediately after the
  17. * earlier power of two caches
  18. */
  19. if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
  20. !kmalloc_caches[type][1])
  21. new_kmalloc_cache(1, type, flags);
  22. if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
  23. !kmalloc_caches[type][2])
  24. new_kmalloc_cache(2, type, flags);
  25. }
  26. }
  27. /* Kmalloc array is now usable */
  28. slab_state = UP;
  29. #ifdef CONFIG_ZONE_DMA
  30. for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
  31. struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
  32. if (s) {
  33. unsigned int size = kmalloc_size(i);
  34. const char *n = kmalloc_cache_name("dma-kmalloc", size);
  35. BUG_ON(!n);
  36. kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
  37. n, size, SLAB_CACHE_DMA | flags, 0, 0);
  38. }
  39. }
  40. #endif
  41. }

 new_kmalloc_cache创建内核预先准备的固定大小的内存块。 

  1. static void __init new_kmalloc_cache(int idx, slab_flags_t flags)
  2. {
  3. kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
  4. kmalloc_info[idx].size, flags, 0,
  5. kmalloc_info[idx].size);
  6. }
  7. /*
  8. * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
  9. * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
  10. * kmalloc-67108864.
  11. */
  12. const struct kmalloc_info_struct kmalloc_info[] __initconst = {
  13. {NULL, 0}, {"kmalloc-96", 96},
  14. {"kmalloc-192", 192}, {"kmalloc-8", 8},
  15. {"kmalloc-16", 16}, {"kmalloc-32", 32},
  16. {"kmalloc-64", 64}, {"kmalloc-128", 128},
  17. {"kmalloc-256", 256}, {"kmalloc-512", 512},
  18. {"kmalloc-1024", 1024}, {"kmalloc-2048", 2048},
  19. {"kmalloc-4096", 4096}, {"kmalloc-8192", 8192},
  20. {"kmalloc-16384", 16384}, {"kmalloc-32768", 32768},
  21. {"kmalloc-65536", 65536}, {"kmalloc-131072", 131072},
  22. {"kmalloc-262144", 262144}, {"kmalloc-524288", 524288},
  23. {"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152},
  24. {"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608},
  25. {"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432},
  26. {"kmalloc-67108864", 67108864}
  27. };

kmem_cache_create

内核通过kmem_cache_create()接口来创建一个slab缓存。

一共有5个参数。

name: 要创建的slab对象的名称
size: slab对象的大小
align: slab对象的对齐大小
flags: slab内存分配器的掩码和标志位, 比如常用的SLAB_HWCACHE_ALIGN标志位,创建的kmem_cache管理的object按照硬件cache 对齐
ctor: 对象的构造函数

  1. struct kmem_cache *
  2. kmem_cache_create(const char *name, unsigned int size, unsigned int align,
  3. slab_flags_t flags, void (*ctor)(void *))
  4. {
  5. return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
  6. ctor);
  7. }
  8. /*内容通过精简*/
  9. struct kmem_cache *
  10. kmem_cache_create_usercopy(const char *name,
  11. unsigned int size, unsigned int align,
  12. slab_flags_t flags,
  13. unsigned int useroffset, unsigned int usersize,
  14. void (*ctor)(void *))
  15. {
  16. struct kmem_cache *s = NULL;
  17. const char *cache_name;
  18. int err;
  19. if (!usersize)
  20. s = __kmem_cache_alias(name, size, align, flags, ctor);
  21. if (s)
  22. goto out_unlock;
  23. cache_name = kstrdup_const(name, GFP_KERNEL);
  24. if (!cache_name) {
  25. err = -ENOMEM;
  26. goto out_unlock;
  27. }
  28. /*通过create_cache函数 创建*/
  29. s = create_cache(cache_name, size,
  30. calculate_alignment(flags, align, size),
  31. flags, useroffset, usersize, ctor, NULL, NULL);
  32. if (IS_ERR(s)) {
  33. err = PTR_ERR(s);
  34. kfree_const(cache_name);
  35. }
  36. out_unlock:
  37. return s;
  38. }


create_cache函数

  1. static struct kmem_cache *create_cache(const char *name,
  2. unsigned int object_size, unsigned int align,
  3. slab_flags_t flags, unsigned int useroffset,
  4. unsigned int usersize, void (*ctor)(void *),
  5. struct mem_cgroup *memcg, struct kmem_cache *root_cache)
  6. {
  7. struct kmem_cache *s;
  8. int err;
  9. if (WARN_ON(useroffset + usersize > object_size))
  10. useroffset = usersize = 0;
  11. err = -ENOMEM;
  12. s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); //kmem_cache是 kmem 初始化创建的 kmem_cache对象
  13. if (!s)
  14. goto out;
  15. s->name = name;
  16. s->size = s->object_size = object_size;
  17. s->align = align;
  18. s->ctor = ctor;
  19. s->useroffset = useroffset;
  20. s->usersize = usersize;
  21. err = init_memcg_params(s, memcg, root_cache);
  22. if (err)
  23. goto out_free_cache;
  24. err = __kmem_cache_create(s, flags); //进一步初始化
  25. if (err)
  26. goto out_free_cache;
  27. s->refcount = 1;
  28. list_add(&s->list, &slab_caches); //加入到slab_caches链表中
  29. memcg_link_cache(s);
  30. out:
  31. if (err)
  32. return ERR_PTR(err);
  33. return s;
  34. out_free_cache:
  35. destroy_memcg_params(s);
  36. kmem_cache_free(kmem_cache, s);
  37. goto out;
  38. }

__kmem_cache_create函数 

  1. int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
  2. {
  3. int err;
  4. err = kmem_cache_open(s, flags);
  5. if (err)
  6. return err;
  7. /* Mutex is not taken during early boot */
  8. if (slab_state <= UP)
  9. return 0;
  10. memcg_propagate_slab_attrs(s);
  11. err = sysfs_slab_add(s);
  12. if (err)
  13. __kmem_cache_release(s);
  14. return err;
  15. }
  16. //内容精简
  17. static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
  18. {
  19. ....................
  20. s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
  21. #ifdef CONFIG_SLAB_FREELIST_HARDENED
  22. s->random = get_random_long();
  23. #endif
  24. if (!calculate_sizes(s, -1))
  25. goto error;
  26. if (!init_kmem_cache_nodes(s)) //创建kmem_cache 的struct kmem_cache_node *node[MAX_NUMNODES];
  27. goto error;
  28. if (alloc_kmem_cache_cpus(s)) //创建kmem_cache的struct kmem_cache_cpu __percpu *cpu_slab;
  29. ...........
  30. }
  31. static int init_kmem_cache_nodes(struct kmem_cache *s)
  32. {
  33. int node;
  34. for_each_node_state(node, N_NORMAL_MEMORY) {
  35. struct kmem_cache_node *n;
  36. if (slab_state == DOWN) {
  37. early_kmem_cache_node_alloc(node);
  38. continue;
  39. }
  40. n = kmem_cache_alloc_node(kmem_cache_node,
  41. GFP_KERNEL, node); //使用kmem_cache_node分配struct kmem_cache_node 。因此存在是时候kmem_cache_node自身的时候的 鸡生蛋的问题。 因此有一个slab_state状态enum的使用,在一开始调用early_kmem_cache_node_alloc
  42. if (!n) {
  43. free_kmem_cache_nodes(s);
  44. return 0;
  45. }
  46. init_kmem_cache_node(n);
  47. s->node[node] = n;
  48. }
  49. return 1;
  50. }



 

kmalloc函数

从kmalloc函数看,内核如何分配小块内存:

  1. KMALLOC(SLUB):
  2. static __always_inline void *kmalloc(size_t size, gfp_t flags)
  3. {
  4. if (__builtin_constant_p(size)) {
  5. #ifndef CONFIG_SLOB
  6. unsigned int index;
  7. #endif
  8. if (size > KMALLOC_MAX_CACHE_SIZE)
  9. return kmalloc_large(size, flags);
  10. #ifndef CONFIG_SLOB
  11. index = kmalloc_index(size);
  12. if (!index)
  13. return ZERO_SIZE_PTR;
  14. return kmem_cache_alloc_trace(
  15. kmalloc_caches[kmalloc_type(flags)][index],
  16. flags, size);
  17. #endif
  18. }
  19. return __kmalloc(size, flags);
  20. }
  21. void *__kmalloc(size_t size, gfp_t flags)
  22. {
  23. struct kmem_cache *s;
  24. void *ret;
  25. if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
  26. return kmalloc_large(size, flags);
  27. s = kmalloc_slab(size, flags); // 1 获取kmem_cache
  28. if (unlikely(ZERO_OR_NULL_PTR(s)))
  29. return s;
  30. ret = slab_alloc(s, flags, _RET_IP_); //2 ,分配内存
  31. trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
  32. ret = kasan_kmalloc(s, ret, size, flags);
  33. return ret;
  34. }
  35. static __always_inline void *slab_alloc(struct kmem_cache *s,
  36. gfp_t gfpflags, unsigned long addr)
  37. {
  38. return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
  39. }

1:kmalloc_slab获取kmem_cahce

  1. struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
  2. {
  3. unsigned int index;
  4. if (unlikely(size > KMALLOC_MAX_SIZE)) {
  5. WARN_ON_ONCE(!(flags & __GFP_NOWARN));
  6. return NULL;
  7. }
  8. if (size <= 192) {
  9. if (!size)
  10. return ZERO_SIZE_PTR;
  11. index = size_index[size_index_elem(size)];
  12. } else
  13. index = fls(size - 1);
  14. #ifdef CONFIG_ZONE_DMA
  15. if (unlikely((flags & GFP_DMA)))
  16. return kmalloc_dma_caches[index];
  17. #endif
  18. return kmalloc_caches[index]; //从已经存在的kmalloc_caches中获取 。
  19. }

2 slab_alloc函数

  1. static __always_inline void *slab_alloc(struct kmem_cache *s,
  2. gfp_t gfpflags, unsigned long addr)
  3. {
  4. return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
  5. }

kmem_cache_alloc 

相比于kmalloc。kmem_cache_alloc创建内存是需要携带kmem_cache * 指针,其创建的内存块大小是固定的。因为kmalloc所利用的内存块的大小是事先定义好的,所以很多情况下会产生内部碎片,浪费空间,而kmem_cache_alloc由于内存大小也是量身定做的缘故则不会。

  1. void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
  2. {
  3. void *ret = slab_alloc(s, gfpflags, _RET_IP_); // 依然是slab_alloc函数分配内存
  4. trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
  5. s->size, gfpflags);
  6. return ret;
  7. }
  8. EXPORT_SYMBOL(kmem_cache_alloc);
  1. static __always_inline void *slab_alloc(struct kmem_cache *s,
  2. gfp_t gfpflags, unsigned long addr)
  3. {
  4. return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
  5. }
  6. static __always_inline void *slab_alloc_node(struct kmem_cache *s,
  7. gfp_t gfpflags, int node, unsigned long addr)
  8. {
  9. void *object;
  10. struct kmem_cache_cpu *c;
  11. struct page *page;
  12. unsigned long tid;
  13. s = slab_pre_alloc_hook(s, gfpflags);
  14. if (!s)
  15. return NULL;
  16. redo:
  17. /*
  18. * Must read kmem_cache cpu data via this cpu ptr. Preemption is
  19. * enabled. We may switch back and forth between cpus while
  20. * reading from one cpu area. That does not matter as long
  21. * as we end up on the original cpu again when doing the cmpxchg.
  22. *
  23. * We should guarantee that tid and kmem_cache are retrieved on
  24. * the same cpu. It could be different if CONFIG_PREEMPT so we need
  25. * to check if it is matched or not.
  26. */
  27. do {
  28. tid = this_cpu_read(s->cpu_slab->tid);
  29. c = raw_cpu_ptr(s->cpu_slab);
  30. } while (IS_ENABLED(CONFIG_PREEMPT) &&
  31. unlikely(tid != READ_ONCE(c->tid)));
  32. /*
  33. * Irqless object alloc/free algorithm used here depends on sequence
  34. * of fetching cpu_slab's data. tid should be fetched before anything
  35. * on c to guarantee that object and page associated with previous tid
  36. * won't be used with current tid. If we fetch tid first, object and
  37. * page could be one associated with next tid and our alloc/free
  38. * request will be failed. In this case, we will retry. So, no problem.
  39. */
  40. barrier();
  41. /*
  42. * The transaction ids are globally unique per cpu and per operation on
  43. * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
  44. * occurs on the right processor and that there was no operation on the
  45. * linked list in between.
  46. */
  47. object = c->freelist;
  48. page = c->page;
  49. if (unlikely(!object || !node_match(page, node))) { //当前per-cpu 没有空闲或者node节点不匹配 从新创建一个缓冲区
  50. object = __slab_alloc(s, gfpflags, node, addr, c);
  51. stat(s, ALLOC_SLOWPATH);
  52. } else {
  53. void *next_object = get_freepointer_safe(s, object);
  54. /*
  55. * The cmpxchg will only match if there was no additional
  56. * operation and if we are on the right processor.
  57. *
  58. * The cmpxchg does the following atomically (without lock
  59. * semantics!)
  60. * 1. Relocate first pointer to the current per cpu area.
  61. * 2. Verify that tid and freelist have not been changed
  62. * 3. If they were not changed replace tid and freelist
  63. *
  64. * Since this is without lock semantics the protection is only
  65. * against code executing on this cpu *not* from access by
  66. * other cpus.
  67. */
  68. /* 下面交换cpu_slab->freelist 与next_object的值,使得freelist指向下次申请的object地址*/
  69. if (unlikely(!this_cpu_cmpxchg_double(
  70. s->cpu_slab->freelist, s->cpu_slab->tid,
  71. object, tid,
  72. next_object, next_tid(tid)))) {
  73. note_cmpxchg_failure("slab_alloc", s, tid);
  74. goto redo;
  75. }
  76. prefetch_freepointer(s, next_object);
  77. stat(s, ALLOC_FASTPATH);
  78. }
  79. if (unlikely(gfpflags & __GFP_ZERO) && object)
  80. memset(object, 0, s->object_size);
  81. slab_post_alloc_hook(s, gfpflags, 1, &object);
  82. return object;
  83. }

slab_alloc_node函数

slab_alloc_node
    __slab_alloc
        ___slab_alloc
            new_slab_objects
                new_slab
                    allocate_slab
                        alloc_slab_page
                        shuffle_freelist  //会循环填充page中的object中的地址内容。
                c=raw_cpu_ptr
                c->page = page

          c->freelist = get_freepoint
            
    get_freepointer_safe  //获取下一个object的地址

___slab_alloc 精简 

  1. static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  2. unsigned long addr, struct kmem_cache_cpu *c)
  3. {
  4. void *freelist;
  5. struct page *page;
  6. page = c->page;
  7. if (!page)
  8. goto new_slab;
  9. redo:
  10. .....
  11. load_freelist:
  12. /*
  13. * freelist is pointing to the list of objects to be used.
  14. * page is pointing to the page from which the objects are obtained.
  15. * That page must be frozen for per cpu allocations to work.
  16. */
  17. VM_BUG_ON(!c->page->frozen);
  18. c->freelist = get_freepointer(s, freelist);
  19. c->tid = next_tid(c->tid);
  20. return freelist;
  21. new_slab:
  22. freelist = new_slab_objects(s, gfpflags, node, &c);
  23. page = c->page;
  24. if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
  25. goto load_freelist; //跳转,设置c->freelist
  26. ........
  27. }

 shuffle_freelist函数

  1. static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
  2. {
  3. void *start;
  4. void *cur;
  5. void *next;
  6. unsigned long idx, pos, page_limit, freelist_count;
  7. if (page->objects < 2 || !s->random_seq)
  8. return false;
  9. freelist_count = oo_objects(s->oo);
  10. pos = get_random_int() % freelist_count;
  11. page_limit = page->objects * s->size;
  12. start = fixup_red_left(s, page_address(page));
  13. /* First entry is used as the base of the freelist */
  14. cur = next_freelist_entry(s, page, &pos, start, page_limit,
  15. freelist_count);
  16. page->freelist = cur;
  17. for (idx = 1; idx < page->objects; idx++) {
  18. setup_object(s, page, cur);
  19. next = next_freelist_entry(s, page, &pos, start, page_limit,
  20. freelist_count);
  21. set_freepointer(s, cur, next); //设置每个object的地址,根据偏移量。
  22. cur = next;
  23. }
  24. setup_object(s, page, cur);
  25. set_freepointer(s, cur, NULL);
  26. return true;
  27. }
  28. //往当前object 偏移 offset 的位置中,填充下一个object的地址fp 。
  29. static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
  30. {
  31. unsigned long freeptr_addr = (unsigned long)object + s->offset;
  32. #ifdef CONFIG_SLAB_FREELIST_HARDENED
  33. BUG_ON(object == fp); /* naive detection of double free or corruption */
  34. #endif
  35. *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
  36. }

slab缓存区的object布局: 

  1. static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
  2. {
  3. unsigned long freepointer_addr;
  4. void *p;
  5. if (!debug_pagealloc_enabled())
  6. return get_freepointer(s, object);
  7. freepointer_addr = (unsigned long)object + s->offset;
  8. probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p)); //从地址freepointer_addr读取8字节的内容,到p。 实际上该位置存储这下一个object的地址。
  9. return freelist_ptr(s, p, freepointer_addr);
  10. }
  11. static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
  12. unsigned long ptr_addr)
  13. {
  14. #ifdef CONFIG_SLAB_FREELIST_HARDENED
  15. return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
  16. #else
  17. return ptr;
  18. #endif
  19. }

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小蓝xlanll/article/detail/214541
推荐阅读
相关标签
  

闽ICP备14008679号