1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
|
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2016 by Roman Stolyarov
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include "config.h"
#include "cpu.h"
#include "nand.h"
#include "nand_id.h"
#include "system.h"
#include "panic.h"
#include "kernel.h"
#include "storage.h"
#include "string.h"
/*#define LOGF_ENABLE*/
#include "logf.h"
//#define USE_DMA
//#define USE_ECC
/*
* Standard NAND flash commands
*/
#define NAND_CMD_READ0 0
#define NAND_CMD_READ1 1
#define NAND_CMD_RNDOUT 5
#define NAND_CMD_PAGEPROG 0x10
#define NAND_CMD_READOOB 0x50
#define NAND_CMD_ERASE1 0x60
#define NAND_CMD_STATUS 0x70
#define NAND_CMD_STATUS_MULTI 0x71
#define NAND_CMD_SEQIN 0x80
#define NAND_CMD_RNDIN 0x85
#define NAND_CMD_READID 0x90
#define NAND_CMD_ERASE2 0xd0
#define NAND_CMD_RESET 0xff
/* Extended commands for large page devices */
#define NAND_CMD_READSTART 0x30
#define NAND_CMD_RNDOUTSTART 0xE0
#define NAND_CMD_CACHEDPROG 0x15
/* Status bits */
#define NAND_STATUS_FAIL 0x01
#define NAND_STATUS_FAIL_N1 0x02
#define NAND_STATUS_TRUE_READY 0x20
#define NAND_STATUS_READY 0x40
#define NAND_STATUS_WP 0x80
/*
* NAND parameter struct
*/
struct nand_param {
unsigned int bus_width; /* data bus width: 8-bit/16-bit */
unsigned int row_cycle; /* row address cycles: 2/3 */
unsigned int page_size; /* page size in bytes: 512/2048/4096 */
unsigned int oob_size; /* oob size in bytes: 16/64/128 */
unsigned int page_per_block; /* pages per block: 32/64/128 */
unsigned int bad_block_pos; /* bad block pos in oob: 0/5 */
};
/*
* jz4760_nand.c
*
* NAND read routine for JZ4760
*
* Copyright (c) 2005-2008 Ingenic Semiconductor Inc.
*
*/
#define CFG_NAND_BASE 0xBA000000
#define NAND_ADDR_OFFSET 0x00800000
#define NAND_CMD_OFFSET 0x00400000
#define CFG_NAND_SMCR1 0x0d444400
#define NAND_DATAPORT CFG_NAND_BASE
#define NAND_ADDRPORT (CFG_NAND_BASE | NAND_ADDR_OFFSET)
#define NAND_COMMPORT (CFG_NAND_BASE | NAND_CMD_OFFSET)
#define ECC_BLOCK 512
#define ECC_POS 24
#define PAR_SIZE 13
#define __nand_cmd(n) (REG8(NAND_COMMPORT) = (n))
#define __nand_addr(n) (REG8(NAND_ADDRPORT) = (n))
#define __nand_data8() (REG8(NAND_DATAPORT))
#define __nand_data16() (REG16(NAND_DATAPORT))
#define __nand_select() (REG_NEMC_NFCSR |= NEMC_NFCSR_NFE1 | NEMC_NFCSR_NFCE1)
#define __nand_deselect() (REG_NEMC_NFCSR &= ~(NEMC_NFCSR_NFE1 | NEMC_NFCSR_NFCE1))
/*--------------------------------------------------------------*/
static struct nand_info* chip_info = NULL;
static struct nand_info* bank;
static unsigned long nand_size;
static struct nand_param internal_param;
static struct mutex nand_mtx;
#ifdef USE_DMA
static struct mutex nand_dma_mtx;
static struct semaphore nand_dma_complete;
#endif
static unsigned char temp_page[2048]; /* Max page size */
static inline void jz_nand_wait_ready(void)
{
unsigned int timeout = 1000;
while ((REG_GPIO_PXPIN(0) & 0x00100000) && timeout--);
while (!(REG_GPIO_PXPIN(0) & 0x00100000));
}
#ifndef USE_DMA
static inline void jz_nand_read_buf16(void *buf, int count)
{
register int i;
register unsigned short *p = (unsigned short *)buf;
for (i = 0; i < count; i += 2)
*p++ = __nand_data16();
}
static inline void jz_nand_read_buf8(void *buf, int count)
{
register int i;
register unsigned char *p = (unsigned char *)buf;
for (i = 0; i < count; i++)
*p++ = __nand_data8();
}
#else
static void jz_nand_write_dma(void *source, unsigned int len, int bw)
{
mutex_lock(&nand_dma_mtx);
if(((unsigned int)source < 0xa0000000) && len)
commit_discard_dcache_range(source, len);
dma_enable();
REG_DMAC_DCCSR(DMA_NAND_CHANNEL) = DMAC_DCCSR_NDES;
REG_DMAC_DSAR(DMA_NAND_CHANNEL) = PHYSADDR((unsigned long)source);
REG_DMAC_DTAR(DMA_NAND_CHANNEL) = PHYSADDR((unsigned long)NAND_DATAPORT);
REG_DMAC_DTCR(DMA_NAND_CHANNEL) = len / 16;
REG_DMAC_DRSR(DMA_NAND_CHANNEL) = DMAC_DRSR_RS_AUTO;
REG_DMAC_DCMD(DMA_NAND_CHANNEL) = (DMAC_DCMD_SAI| DMAC_DCMD_DAI | DMAC_DCMD_SWDH_32 | DMAC_DCMD_DS_16BYTE |
(bw == 8 ? DMAC_DCMD_DWDH_8 : DMAC_DCMD_DWDH_16));
REG_DMAC_DCCSR(DMA_NAND_CHANNEL) |= DMAC_DCCSR_EN; /* Enable DMA channel */
#if 1
while( REG_DMAC_DTCR(DMA_NAND_CHANNEL) )
yield();
#else
REG_DMAC_DCMD(DMA_NAND_CHANNEL) |= DMAC_DCMD_TIE; /* Enable DMA interrupt */
semaphore_wait(&nand_dma_complete, TIMEOUT_BLOCK);
#endif
REG_DMAC_DCCSR(DMA_NAND_CHANNEL) &= ~DMAC_DCCSR_EN; /* Disable DMA channel */
dma_disable();
mutex_unlock(&nand_dma_mtx);
}
static void jz_nand_read_dma(void *target, unsigned int len, int bw)
{
mutex_lock(&nand_dma_mtx);
if(((unsigned int)target < 0xa0000000) && len)
discard_dcache_range(target, len);
dma_enable();
REG_DMAC_DCCSR(DMA_NAND_CHANNEL) = DMAC_DCCSR_NDES ;
REG_DMAC_DSAR(DMA_NAND_CHANNEL) = PHYSADDR((unsigned long)NAND_DATAPORT);
REG_DMAC_DTAR(DMA_NAND_CHANNEL) = PHYSADDR((unsigned long)target);
REG_DMAC_DTCR(DMA_NAND_CHANNEL) = len / 4;
REG_DMAC_DRSR(DMA_NAND_CHANNEL) = DMAC_DRSR_RS_AUTO;
REG_DMAC_DCMD(DMA_NAND_CHANNEL) = (DMAC_DCMD_SAI| DMAC_DCMD_DAI | DMAC_DCMD_DWDH_32 | DMAC_DCMD_DS_32BIT |
(bw == 8 ? DMAC_DCMD_SWDH_8 : DMAC_DCMD_SWDH_16));
REG_DMAC_DCCSR(DMA_NAND_CHANNEL) |= DMAC_DCCSR_EN; /* Enable DMA channel */
#if 1
while( REG_DMAC_DTCR(DMA_NAND_CHANNEL) )
yield();
#else
REG_DMAC_DCMD(DMA_NAND_CHANNEL) |= DMAC_DCMD_TIE; /* Enable DMA interrupt */
semaphore_wait(&nand_dma_complete, TIMEOUT_BLOCK);
#endif
//REG_DMAC_DCCSR(DMA_NAND_CHANNEL) &= ~DMAC_DCCSR_EN; /* Disable DMA channel */
dma_disable();
mutex_unlock(&nand_dma_mtx);
}
void DMA_CALLBACK(DMA_NAND_CHANNEL)(void)
{
if (REG_DMAC_DCCSR(DMA_NAND_CHANNEL) & DMAC_DCCSR_HLT)
REG_DMAC_DCCSR(DMA_NAND_CHANNEL) &= ~DMAC_DCCSR_HLT;
if (REG_DMAC_DCCSR(DMA_NAND_CHANNEL) & DMAC_DCCSR_AR)
REG_DMAC_DCCSR(DMA_NAND_CHANNEL) &= ~DMAC_DCCSR_AR;
if (REG_DMAC_DCCSR(DMA_NAND_CHANNEL) & DMAC_DCCSR_CT)
REG_DMAC_DCCSR(DMA_NAND_CHANNEL) &= ~DMAC_DCCSR_CT;
if (REG_DMAC_DCCSR(DMA_NAND_CHANNEL) & DMAC_DCCSR_TT)
REG_DMAC_DCCSR(DMA_NAND_CHANNEL) &= ~DMAC_DCCSR_TT;
semaphore_release(&nand_dma_complete);
}
#endif /* USE_DMA */
static inline void jz_nand_read_buf(void *buf, int count, int bw)
{
#ifdef USE_DMA
if (bw == 8)
jz_nand_read_dma(buf, count, 8);
else
jz_nand_read_dma(buf, count, 16);
#else
if (bw == 8)
jz_nand_read_buf8(buf, count);
else
jz_nand_read_buf16(buf, count);
#endif
}
#ifdef USE_ECC
/*
* Correct 1~9-bit errors in 512-bytes data
*/
static void jz_rs_correct(unsigned char *dat, int idx, int mask)
{
int i, j;
unsigned short d, d1, dm;
i = (idx * 9) >> 3;
j = (idx * 9) & 0x7;
i = (j == 0) ? (i - 1) : i;
j = (j == 0) ? 7 : (j - 1);
if (i > 512)
return;
if (i == 512)
d = dat[i - 1];
else
d = (dat[i] << 8) | dat[i - 1];
d1 = (d >> j) & 0x1ff;
d1 ^= mask;
dm = ~(0x1ff << j);
d = (d & dm) | (d1 << j);
dat[i - 1] = d & 0xff;
if (i < 512)
dat[i] = (d >> 8) & 0xff;
}
#endif
/*
* Read oob
*/
static int jz_nand_read_oob(unsigned long page_addr, unsigned char *buf, int size)
{
struct nand_param *nandp = &internal_param;
int page_size, row_cycle, bus_width;
int col_addr;
page_size = nandp->page_size;
row_cycle = nandp->row_cycle;
bus_width = nandp->bus_width;
if (page_size >= 2048)
col_addr = page_size;
else
col_addr = 0;
if (page_size >= 2048)
/* Send READ0 command */
__nand_cmd(NAND_CMD_READ0);
else
/* Send READOOB command */
__nand_cmd(NAND_CMD_READOOB);
/* Send column address */
__nand_addr(col_addr & 0xff);
if (page_size >= 2048)
__nand_addr((col_addr >> 8) & 0xff);
/* Send page address */
__nand_addr(page_addr & 0xff);
__nand_addr((page_addr >> 8) & 0xff);
if (row_cycle == 3)
__nand_addr((page_addr >> 16) & 0xff);
/* Send READSTART command for 2048 ps NAND */
if (page_size >= 2048)
__nand_cmd(NAND_CMD_READSTART);
/* Wait for device ready */
jz_nand_wait_ready();
/* Read oob data */
jz_nand_read_buf(buf, size, bus_width);
return 0;
}
/*
* nand_read_page()
*
* Input:
*
* block - block number: 0, 1, 2, ...
* page - page number within a block: 0, 1, 2, ...
* dst - pointer to target buffer
*/
static int jz_nand_read_page(unsigned long page_addr, unsigned char *dst)
{
struct nand_param *nandp = &internal_param;
int page_size, oob_size;
int row_cycle, bus_width, ecc_count;
int i;
#ifdef USE_ECC
int j;
#endif
unsigned char *data_buf;
unsigned char oob_buf[nandp->oob_size];
page_size = nandp->page_size;
oob_size = nandp->oob_size;
row_cycle = nandp->row_cycle;
bus_width = nandp->bus_width;
/*
* Read oob data
*/
jz_nand_read_oob(page_addr, oob_buf, oob_size);
/*
* Read page data
*/
/* Send READ0 command */
__nand_cmd(NAND_CMD_READ0);
/* Send column address */
__nand_addr(0);
if (page_size >= 2048)
__nand_addr(0);
/* Send page address */
__nand_addr(page_addr & 0xff);
__nand_addr((page_addr >> 8) & 0xff);
if (row_cycle >= 3)
__nand_addr((page_addr >> 16) & 0xff);
/* Send READSTART command for 2048 ps NAND */
if (page_size >= 2048)
__nand_cmd(NAND_CMD_READSTART);
/* Wait for device ready */
jz_nand_wait_ready();
/* Read page data */
data_buf = dst;
ecc_count = page_size / ECC_BLOCK;
for (i = 0; i < ecc_count; i++)
{
#ifdef USE_ECC
volatile unsigned char *paraddr = (volatile unsigned char *)EMC_NFPAR0;
unsigned int stat;
/* Enable RS decoding */
REG_EMC_NFINTS = 0x0;
__ecc_decoding_4bit();
#endif
/* Read data */
jz_nand_read_buf((void *)data_buf, ECC_BLOCK, bus_width);
#ifdef USE_ECC
/* Set PAR values */
for (j = 0; j < PAR_SIZE; j++)
*paraddr++ = oob_buf[ECC_POS + i*PAR_SIZE + j];
/* Set PRDY */
REG_EMC_NFECR |= EMC_NFECR_PRDY;
/* Wait for completion */
__ecc_decode_sync();
/* Disable decoding */
__ecc_disable();
/* Check result of decoding */
stat = REG_EMC_NFINTS;
if (stat & EMC_NFINTS_ERR)
{
/* Error occurred */
if (stat & EMC_NFINTS_UNCOR)
{
/* Uncorrectable error occurred */
logf("Uncorrectable ECC error at NAND page address 0x%lx", page_addr);
return -1;
}
else
{
unsigned int errcnt, index, mask;
errcnt = (stat & EMC_NFINTS_ERRCNT_MASK) >> EMC_NFINTS_ERRCNT_BIT;
switch (errcnt)
{
case 4:
index = (REG_EMC_NFERR3 & EMC_NFERR_INDEX_MASK) >> EMC_NFERR_INDEX_BIT;
mask = (REG_EMC_NFERR3 & EMC_NFERR_MASK_MASK) >> EMC_NFERR_MASK_BIT;
jz_rs_correct(data_buf, index, mask);
case 3:
index = (REG_EMC_NFERR2 & EMC_NFERR_INDEX_MASK) >> EMC_NFERR_INDEX_BIT;
mask = (REG_EMC_NFERR2 & EMC_NFERR_MASK_MASK) >> EMC_NFERR_MASK_BIT;
jz_rs_correct(data_buf, index, mask);
case 2:
index = (REG_EMC_NFERR1 & EMC_NFERR_INDEX_MASK) >> EMC_NFERR_INDEX_BIT;
mask = (REG_EMC_NFERR1 & EMC_NFERR_MASK_MASK) >> EMC_NFERR_MASK_BIT;
jz_rs_correct(data_buf, index, mask);
case 1:
index = (REG_EMC_NFERR0 & EMC_NFERR_INDEX_MASK) >> EMC_NFERR_INDEX_BIT;
mask = (REG_EMC_NFERR0 & EMC_NFERR_MASK_MASK) >> EMC_NFERR_MASK_BIT;
jz_rs_correct(data_buf, index, mask);
break;
default:
break;
}
}
}
#endif
data_buf += ECC_BLOCK;
}
return 0;
}
static int jz_nand_init(void)
{
unsigned char cData[5];
__gpio_as_nand_16bit(1);
REG_NEMC_SMCR1 = CFG_NAND_SMCR1 | 0x40;
__nand_select();
__nand_cmd(NAND_CMD_READID);
__nand_addr(NAND_CMD_READ0);
cData[0] = __nand_data8();
cData[1] = __nand_data8();
cData[2] = __nand_data8();
cData[3] = __nand_data8();
cData[4] = __nand_data8();
__nand_deselect();
logf("NAND chip %d: 0x%x 0x%x 0x%x 0x%x 0x%x", i+1, cData[0], cData[1],
cData[2], cData[3], cData[4]);
bank = nand_identify(cData);
if(bank == NULL)
{
panicf("Unknown NAND flash chip: 0x%x 0x%x 0x%x 0x%x 0x%x", cData[0],
cData[1], cData[2], cData[3], cData[4]);
return -1; /* panicf() doesn't return though */
}
chip_info = bank;
internal_param.bus_width = 16;
internal_param.row_cycle = chip_info->row_cycles;
internal_param.page_size = chip_info->page_size;
internal_param.oob_size = chip_info->spare_size;
internal_param.page_per_block = chip_info->pages_per_block;
internal_param.bad_block_pos = 0;
nand_size = ((chip_info->page_size * chip_info->blocks_per_bank * chip_info->pages_per_block) - 0x200000) / 512;
return 0;
}
int nand_init(void)
{
int res = 0;
static bool inited = false;
if(!inited)
{
res = jz_nand_init();
mutex_init(&nand_mtx);
#ifdef USE_DMA
mutex_init(&nand_dma_mtx);
semaphore_init(&nand_dma_complete, 1, 0);
system_enable_irq(DMA_IRQ(DMA_NAND_CHANNEL));
#endif
inited = true;
}
return res;
}
static inline int read_sector(unsigned long start, unsigned int count,
void* buf, unsigned int chip_size)
{
register int ret;
if(UNLIKELY(start % chip_size == 0 && count == chip_size))
ret = jz_nand_read_page(start / chip_size, buf);
else
{
ret = jz_nand_read_page(start / chip_size, temp_page);
memcpy(buf, temp_page + (start % chip_size), count);
}
return ret;
}
static inline int write_sector(unsigned long start, unsigned int count,
const void* buf, unsigned int chip_size)
{
int ret = 0;
(void)start;
(void)count;
(void)buf;
(void)chip_size;
/* TODO */
return ret;
}
int nand_read_sectors(IF_MD(int drive,) unsigned long start, int count, void* buf)
{
#ifdef HAVE_MULTIDRIVE
(void)drive;
#endif
int ret = 0;
unsigned int _count, chip_size = chip_info->page_size;
unsigned long _start;
logf("start");
mutex_lock(&nand_mtx);
_start = start << 9;
_start += 0x200000; /* skip BL */
_count = count << 9;
__nand_select();
ret = read_sector(_start, _count, buf, chip_size);
__nand_deselect();
mutex_unlock(&nand_mtx);
logf("nand_read_sectors(%ld, %d, 0x%x): %d", start, count, (int)buf, ret);
return ret;
}
int nand_write_sectors(IF_MD(int drive,) unsigned long start, int count, const void* buf)
{
#ifdef HAVE_MULTIDRIVE
(void)drive;
#endif
int ret = 0;
unsigned int _count, chip_size = chip_info->page_size;
unsigned long _start;
logf("start");
mutex_lock(&nand_mtx);
_start = start << 9;
_start += chip_info->page_size * chip_info->pages_per_block; /* skip BL */
_count = count << 9;
__nand_select();
ret = write_sector(_start, _count, buf, chip_size);
__nand_deselect();
mutex_unlock(&nand_mtx);
logf("nand_write_sectors(%ld, %d, 0x%x): %d", start, count, (int)buf, ret);
return ret;
}
#ifdef HAVE_STORAGE_FLUSH
int nand_flush(void)
{
return 0;
}
#endif
void nand_spindown(int seconds)
{
/* null */
(void)seconds;
}
void nand_sleep(void)
{
/* null */
}
void nand_spin(void)
{
/* null */
}
void nand_enable(bool on)
{
/* null - flash controller is enabled/disabled as needed. */
(void)on;
}
/* TODO */
long nand_last_disk_activity(void)
{
return 0;
}
int nand_spinup_time(void)
{
return 0;
}
void nand_sleepnow(void)
{
}
#ifdef STORAGE_GET_INFO
void nand_get_info(IF_MD(int drive,) struct storage_info *info)
{
#ifdef HAVE_MULTIDRIVE
(void)drive;
#endif
/* firmware version */
info->revision="0.00";
info->vendor="Rockbox";
info->product="NAND Storage";
/* blocks count */
info->num_sectors = nand_size;
info->sector_size = 512;
}
#endif
#ifdef CONFIG_STORAGE_MULTI
int nand_num_drives(int first_drive)
{
/* We don't care which logical drive number(s) we have been assigned */
(void)first_drive;
return 1;
}
#endif
|