Move arch/ppc to arch/powerpc

As discussed on the list, move "arch/ppc" to "arch/powerpc" to
better match the Linux directory structure.

Please note that this patch also changes the "ppc" target in
MAKEALL to "powerpc" to match this new infrastructure. But "ppc"
is kept as an alias for now, to not break compatibility with
scripts using this name.

Signed-off-by: Stefan Roese <sr@denx.de>
Acked-by: Wolfgang Denk <wd@denx.de>
Acked-by: Detlev Zundel <dzu@denx.de>
Acked-by: Kim Phillips <kim.phillips@freescale.com>
Cc: Peter Tyser <ptyser@xes-inc.com>
Cc: Anatolij Gustschin <agust@denx.de>
This commit is contained in:
Stefan Roese
2010-04-15 16:07:28 +02:00
committed by Wolfgang Denk
parent 254ab7bd46
commit a47a12becf
579 changed files with 879 additions and 877 deletions

View File

@ -0,0 +1,27 @@
#
# Copyright 2009 Freescale Semiconductor, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# Version 2 as published by the Free Software Foundation.
#
include $(TOPDIR)/config.mk
LIB = $(obj)lib8xxx.a
COBJS-y += cpu.o
COBJS-$(CONFIG_OF_LIBFDT) += fdt.o
COBJS-$(CONFIG_PCI) += pci_cfg.o
SRCS := $(START:.o=.S) $(SOBJS-y:.o=.S) $(COBJS-y:.o=.c)
OBJS := $(addprefix $(obj),$(SOBJS-y) $(COBJS-y))
all: $(obj).depend $(LIB)
$(LIB): $(OBJS)
$(AR) $(ARFLAGS) $@ $(OBJS)
include $(SRCTREE)/rules.mk
sinclude $(obj).depend

View File

@ -0,0 +1,144 @@
/*
* Copyright 2009-2010 Freescale Semiconductor, Inc.
*
* This file is derived from arch/powerpc/cpu/mpc85xx/cpu.c and
* arch/powerpc/cpu/mpc86xx/cpu.c. Basically this file contains
* cpu specific common code for 85xx/86xx processors.
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <config.h>
#include <common.h>
#include <command.h>
#include <tsec.h>
#include <netdev.h>
#include <asm/cache.h>
#include <asm/io.h>
DECLARE_GLOBAL_DATA_PTR;
struct cpu_type cpu_type_list [] = {
#if defined(CONFIG_MPC85xx)
CPU_TYPE_ENTRY(8533, 8533, 1),
CPU_TYPE_ENTRY(8533, 8533_E, 1),
CPU_TYPE_ENTRY(8535, 8535, 1),
CPU_TYPE_ENTRY(8535, 8535_E, 1),
CPU_TYPE_ENTRY(8536, 8536, 1),
CPU_TYPE_ENTRY(8536, 8536_E, 1),
CPU_TYPE_ENTRY(8540, 8540, 1),
CPU_TYPE_ENTRY(8541, 8541, 1),
CPU_TYPE_ENTRY(8541, 8541_E, 1),
CPU_TYPE_ENTRY(8543, 8543, 1),
CPU_TYPE_ENTRY(8543, 8543_E, 1),
CPU_TYPE_ENTRY(8544, 8544, 1),
CPU_TYPE_ENTRY(8544, 8544_E, 1),
CPU_TYPE_ENTRY(8545, 8545, 1),
CPU_TYPE_ENTRY(8545, 8545_E, 1),
CPU_TYPE_ENTRY(8547, 8547_E, 1),
CPU_TYPE_ENTRY(8548, 8548, 1),
CPU_TYPE_ENTRY(8548, 8548_E, 1),
CPU_TYPE_ENTRY(8555, 8555, 1),
CPU_TYPE_ENTRY(8555, 8555_E, 1),
CPU_TYPE_ENTRY(8560, 8560, 1),
CPU_TYPE_ENTRY(8567, 8567, 1),
CPU_TYPE_ENTRY(8567, 8567_E, 1),
CPU_TYPE_ENTRY(8568, 8568, 1),
CPU_TYPE_ENTRY(8568, 8568_E, 1),
CPU_TYPE_ENTRY(8569, 8569, 1),
CPU_TYPE_ENTRY(8569, 8569_E, 1),
CPU_TYPE_ENTRY(8572, 8572, 2),
CPU_TYPE_ENTRY(8572, 8572_E, 2),
CPU_TYPE_ENTRY(P1011, P1011, 1),
CPU_TYPE_ENTRY(P1011, P1011_E, 1),
CPU_TYPE_ENTRY(P1012, P1012, 1),
CPU_TYPE_ENTRY(P1012, P1012_E, 1),
CPU_TYPE_ENTRY(P1013, P1013, 1),
CPU_TYPE_ENTRY(P1013, P1013_E, 1),
CPU_TYPE_ENTRY(P1020, P1020, 2),
CPU_TYPE_ENTRY(P1020, P1020_E, 2),
CPU_TYPE_ENTRY(P1021, P1021, 2),
CPU_TYPE_ENTRY(P1021, P1021_E, 2),
CPU_TYPE_ENTRY(P1022, P1022, 2),
CPU_TYPE_ENTRY(P1022, P1022_E, 2),
CPU_TYPE_ENTRY(P2010, P2010, 1),
CPU_TYPE_ENTRY(P2010, P2010_E, 1),
CPU_TYPE_ENTRY(P2020, P2020, 2),
CPU_TYPE_ENTRY(P2020, P2020_E, 2),
CPU_TYPE_ENTRY(P4040, P4040, 4),
CPU_TYPE_ENTRY(P4040, P4040_E, 4),
CPU_TYPE_ENTRY(P4080, P4080, 8),
CPU_TYPE_ENTRY(P4080, P4080_E, 8),
#elif defined(CONFIG_MPC86xx)
CPU_TYPE_ENTRY(8610, 8610, 1),
CPU_TYPE_ENTRY(8641, 8641, 2),
CPU_TYPE_ENTRY(8641D, 8641D, 2),
#endif
};
struct cpu_type cpu_type_unknown = CPU_TYPE_ENTRY(Unknown, Unknown, 1);
struct cpu_type *identify_cpu(u32 ver)
{
int i;
for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) {
if (cpu_type_list[i].soc_ver == ver)
return &cpu_type_list[i];
}
return &cpu_type_unknown;
}
int cpu_numcores() {
struct cpu_type *cpu;
cpu = gd->cpu;
return cpu->num_cores;
}
int probecpu (void)
{
uint svr;
uint ver;
svr = get_svr();
ver = SVR_SOC_VER(svr);
gd->cpu = identify_cpu(ver);
return 0;
}
/*
* Initializes on-chip ethernet controllers.
* to override, implement board_eth_init()
*/
int cpu_eth_init(bd_t *bis)
{
#if defined(CONFIG_ETHER_ON_FCC)
fec_initialize(bis);
#endif
#if defined(CONFIG_UEC_ETH)
uec_standard_init(bis);
#endif
#if defined(CONFIG_TSEC_ENET) || defined(CONFIG_MPC85XX_FEC)
tsec_standard_init(bis);
#endif
return 0;
}

View File

@ -0,0 +1,35 @@
#
# Copyright 2008 Freescale Semiconductor, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# Version 2 as published by the Free Software Foundation.
#
include $(TOPDIR)/config.mk
LIB = $(obj)libddr.a
COBJS-$(CONFIG_FSL_DDR1) += main.o util.o ctrl_regs.o options.o \
lc_common_dimm_params.o
COBJS-$(CONFIG_FSL_DDR1) += ddr1_dimm_params.o
COBJS-$(CONFIG_FSL_DDR2) += main.o util.o ctrl_regs.o options.o \
lc_common_dimm_params.o
COBJS-$(CONFIG_FSL_DDR2) += ddr2_dimm_params.o
COBJS-$(CONFIG_FSL_DDR3) += main.o util.o ctrl_regs.o options.o \
lc_common_dimm_params.o
COBJS-$(CONFIG_FSL_DDR3) += ddr3_dimm_params.o
SRCS := $(START:.o=.S) $(SOBJS-y:.o=.S) $(COBJS-y:.o=.c)
OBJS := $(addprefix $(obj),$(SOBJS-y) $(COBJS-y))
all: $(obj).depend $(LIB)
$(LIB): $(OBJS)
$(AR) $(ARFLAGS) $@ $(OBJS)
include $(SRCTREE)/rules.mk
sinclude $(obj).depend

View File

@ -0,0 +1,53 @@
/*
* Copyright 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* Version 2 as published by the Free Software Foundation.
*/
#ifndef COMMON_TIMING_PARAMS_H
#define COMMON_TIMING_PARAMS_H
typedef struct {
/* parameters to constrict */
unsigned int tCKmin_X_ps;
unsigned int tCKmax_ps;
unsigned int tCKmax_max_ps;
unsigned int tRCD_ps;
unsigned int tRP_ps;
unsigned int tRAS_ps;
unsigned int tWR_ps; /* maximum = 63750 ps */
unsigned int tWTR_ps; /* maximum = 63750 ps */
unsigned int tRFC_ps; /* maximum = 255 ns + 256 ns + .75 ns
= 511750 ps */
unsigned int tRRD_ps; /* maximum = 63750 ps */
unsigned int tRC_ps; /* maximum = 254 ns + .75 ns = 254750 ps */
unsigned int refresh_rate_ps;
unsigned int tIS_ps; /* byte 32, spd->ca_setup */
unsigned int tIH_ps; /* byte 33, spd->ca_hold */
unsigned int tDS_ps; /* byte 34, spd->data_setup */
unsigned int tDH_ps; /* byte 35, spd->data_hold */
unsigned int tRTP_ps; /* byte 38, spd->trtp */
unsigned int tDQSQ_max_ps; /* byte 44, spd->tdqsq */
unsigned int tQHS_ps; /* byte 45, spd->tqhs */
unsigned int ndimms_present;
unsigned int lowest_common_SPD_caslat;
unsigned int highest_common_derated_caslat;
unsigned int additive_latency;
unsigned int all_DIMMs_burst_lengths_bitmask;
unsigned int all_DIMMs_registered;
unsigned int all_DIMMs_unbuffered;
unsigned int all_DIMMs_ECC_capable;
unsigned long long total_mem;
unsigned long long base_address;
} common_timing_params_t;
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,81 @@
/*
* Copyright 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* Version 2 as published by the Free Software Foundation.
*/
#ifndef FSL_DDR_MAIN_H
#define FSL_DDR_MAIN_H
#include <asm/fsl_ddr_sdram.h>
#include <asm/fsl_ddr_dimm_params.h>
#include "common_timing_params.h"
/*
* Bind the main DDR setup driver's generic names
* to this specific DDR technology.
*/
static __inline__ int
compute_dimm_parameters(const generic_spd_eeprom_t *spd,
dimm_params_t *pdimm,
unsigned int dimm_number)
{
return ddr_compute_dimm_parameters(spd, pdimm, dimm_number);
}
/*
* Data Structures
*
* All data structures have to be on the stack
*/
#define CONFIG_SYS_NUM_DDR_CTLRS CONFIG_NUM_DDR_CONTROLLERS
#define CONFIG_SYS_DIMM_SLOTS_PER_CTLR CONFIG_DIMM_SLOTS_PER_CTLR
typedef struct {
generic_spd_eeprom_t
spd_installed_dimms[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_SYS_DIMM_SLOTS_PER_CTLR];
struct dimm_params_s
dimm_params[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_SYS_DIMM_SLOTS_PER_CTLR];
memctl_options_t memctl_opts[CONFIG_SYS_NUM_DDR_CTLRS];
common_timing_params_t common_timing_params[CONFIG_SYS_NUM_DDR_CTLRS];
fsl_ddr_cfg_regs_t fsl_ddr_config_reg[CONFIG_SYS_NUM_DDR_CTLRS];
} fsl_ddr_info_t;
/* Compute steps */
#define STEP_GET_SPD (1 << 0)
#define STEP_COMPUTE_DIMM_PARMS (1 << 1)
#define STEP_COMPUTE_COMMON_PARMS (1 << 2)
#define STEP_GATHER_OPTS (1 << 3)
#define STEP_ASSIGN_ADDRESSES (1 << 4)
#define STEP_COMPUTE_REGS (1 << 5)
#define STEP_PROGRAM_REGS (1 << 6)
#define STEP_ALL 0xFFF
extern unsigned long long
fsl_ddr_compute(fsl_ddr_info_t *pinfo, unsigned int start_step);
extern const char * step_to_string(unsigned int step);
extern unsigned int
compute_fsl_memctl_config_regs(const memctl_options_t *popts,
fsl_ddr_cfg_regs_t *ddr,
const common_timing_params_t *common_dimm,
const dimm_params_t *dimm_parameters,
unsigned int dbw_capacity_adjust);
extern unsigned int
compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
common_timing_params_t *outpdimm,
unsigned int number_of_dimms);
extern unsigned int populate_memctl_options(int all_DIMMs_registered,
memctl_options_t *popts,
dimm_params_t *pdimm,
unsigned int ctrl_num);
extern unsigned int mclk_to_picos(unsigned int mclk);
extern unsigned int get_memory_clk_period_ps(void);
extern unsigned int picos_to_mclk(unsigned int picos);
#endif

View File

@ -0,0 +1,343 @@
/*
* Copyright 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* Version 2 as published by the Free Software Foundation.
*/
#include <common.h>
#include <asm/fsl_ddr_sdram.h>
#include "ddr.h"
/*
* Calculate the Density of each Physical Rank.
* Returned size is in bytes.
*
* Study these table from Byte 31 of JEDEC SPD Spec.
*
* DDR I DDR II
* Bit Size Size
* --- ----- ------
* 7 high 512MB 512MB
* 6 256MB 256MB
* 5 128MB 128MB
* 4 64MB 16GB
* 3 32MB 8GB
* 2 16MB 4GB
* 1 2GB 2GB
* 0 low 1GB 1GB
*
* Reorder Table to be linear by stripping the bottom
* 2 or 5 bits off and shifting them up to the top.
*/
static unsigned long long
compute_ranksize(unsigned int mem_type, unsigned char row_dens)
{
unsigned long long bsize;
/* Bottom 2 bits up to the top. */
bsize = ((row_dens >> 2) | ((row_dens & 3) << 6));
bsize <<= 24ULL;
debug("DDR: DDR I rank density = 0x%08x\n", bsize);
return bsize;
}
/*
* Convert a two-nibble BCD value into a cycle time.
* While the spec calls for nano-seconds, picos are returned.
*
* This implements the tables for bytes 9, 23 and 25 for both
* DDR I and II. No allowance for distinguishing the invalid
* fields absent for DDR I yet present in DDR II is made.
* (That is, cycle times of .25, .33, .66 and .75 ns are
* allowed for both DDR II and I.)
*/
static unsigned int
convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val)
{
/* Table look up the lower nibble, allow DDR I & II. */
unsigned int tenths_ps[16] = {
0,
100,
200,
300,
400,
500,
600,
700,
800,
900,
250, /* This and the next 3 entries valid ... */
330, /* ... only for tCK calculations. */
660,
750,
0, /* undefined */
0 /* undefined */
};
unsigned int whole_ns = (spd_val & 0xF0) >> 4;
unsigned int tenth_ns = spd_val & 0x0F;
unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns];
return ps;
}
static unsigned int
convert_bcd_hundredths_to_cycle_time_ps(unsigned int spd_val)
{
unsigned int tenth_ns = (spd_val & 0xF0) >> 4;
unsigned int hundredth_ns = spd_val & 0x0F;
unsigned int ps = tenth_ns * 100 + hundredth_ns * 10;
return ps;
}
static unsigned int byte40_table_ps[8] = {
0,
250,
330,
500,
660,
750,
0, /* supposed to be RFC, but not sure what that means */
0 /* Undefined */
};
static unsigned int
compute_trfc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trfc)
{
unsigned int trfc_ps;
trfc_ps = (((trctrfc_ext & 0x1) * 256) + trfc) * 1000
+ byte40_table_ps[(trctrfc_ext >> 1) & 0x7];
return trfc_ps;
}
static unsigned int
compute_trc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trc)
{
unsigned int trc_ps;
trc_ps = trc * 1000 + byte40_table_ps[(trctrfc_ext >> 4) & 0x7];
return trc_ps;
}
/*
* tCKmax from DDR I SPD Byte 43
*
* Bits 7:2 == whole ns
* Bits 1:0 == quarter ns
* 00 == 0.00 ns
* 01 == 0.25 ns
* 10 == 0.50 ns
* 11 == 0.75 ns
*
* Returns picoseconds.
*/
static unsigned int
compute_tckmax_from_spd_ps(unsigned int byte43)
{
return (byte43 >> 2) * 1000 + (byte43 & 0x3) * 250;
}
/*
* Determine Refresh Rate. Ignore self refresh bit on DDR I.
* Table from SPD Spec, Byte 12, converted to picoseconds and
* filled in with "default" normal values.
*/
static unsigned int
determine_refresh_rate_ps(const unsigned int spd_refresh)
{
unsigned int refresh_time_ps[8] = {
15625000, /* 0 Normal 1.00x */
3900000, /* 1 Reduced .25x */
7800000, /* 2 Extended .50x */
31300000, /* 3 Extended 2.00x */
62500000, /* 4 Extended 4.00x */
125000000, /* 5 Extended 8.00x */
15625000, /* 6 Normal 1.00x filler */
15625000, /* 7 Normal 1.00x filler */
};
return refresh_time_ps[spd_refresh & 0x7];
}
/*
* The purpose of this function is to compute a suitable
* CAS latency given the DRAM clock period. The SPD only
* defines at most 3 CAS latencies. Typically the slower in
* frequency the DIMM runs at, the shorter its CAS latency can be.
* If the DIMM is operating at a sufficiently low frequency,
* it may be able to run at a CAS latency shorter than the
* shortest SPD-defined CAS latency.
*
* If a CAS latency is not found, 0 is returned.
*
* Do this by finding in the standard speed bin table the longest
* tCKmin that doesn't exceed the value of mclk_ps (tCK).
*
* An assumption made is that the SDRAM device allows the
* CL to be programmed for a value that is lower than those
* advertised by the SPD. This is not always the case,
* as those modes not defined in the SPD are optional.
*
* CAS latency de-rating based upon values JEDEC Standard No. 79-E
* Table 11.
*
* ordinal 2, ddr1_speed_bins[1] contains tCK for CL=2
*/
/* CL2.0 CL2.5 CL3.0 */
unsigned short ddr1_speed_bins[] = {0, 7500, 6000, 5000 };
unsigned int
compute_derated_DDR1_CAS_latency(unsigned int mclk_ps)
{
const unsigned int num_speed_bins = ARRAY_SIZE(ddr1_speed_bins);
unsigned int lowest_tCKmin_found = 0;
unsigned int lowest_tCKmin_CL = 0;
unsigned int i;
debug("mclk_ps = %u\n", mclk_ps);
for (i = 0; i < num_speed_bins; i++) {
unsigned int x = ddr1_speed_bins[i];
debug("i=%u, x = %u, lowest_tCKmin_found = %u\n",
i, x, lowest_tCKmin_found);
if (x && lowest_tCKmin_found <= x && x <= mclk_ps) {
lowest_tCKmin_found = x;
lowest_tCKmin_CL = i + 1;
}
}
debug("lowest_tCKmin_CL = %u\n", lowest_tCKmin_CL);
return lowest_tCKmin_CL;
}
/*
* ddr_compute_dimm_parameters for DDR1 SPD
*
* Compute DIMM parameters based upon the SPD information in spd.
* Writes the results to the dimm_params_t structure pointed by pdimm.
*
* FIXME: use #define for the retvals
*/
unsigned int
ddr_compute_dimm_parameters(const ddr1_spd_eeprom_t *spd,
dimm_params_t *pdimm,
unsigned int dimm_number)
{
unsigned int retval;
if (spd->mem_type) {
if (spd->mem_type != SPD_MEMTYPE_DDR) {
printf("DIMM %u: is not a DDR1 SPD.\n", dimm_number);
return 1;
}
} else {
memset(pdimm, 0, sizeof(dimm_params_t));
return 1;
}
retval = ddr1_spd_check(spd);
if (retval) {
printf("DIMM %u: failed checksum\n", dimm_number);
return 2;
}
/*
* The part name in ASCII in the SPD EEPROM is not null terminated.
* Guarantee null termination here by presetting all bytes to 0
* and copying the part name in ASCII from the SPD onto it
*/
memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
/* DIMM organization parameters */
pdimm->n_ranks = spd->nrows;
pdimm->rank_density = compute_ranksize(spd->mem_type, spd->bank_dens);
pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
pdimm->data_width = spd->dataw_lsb;
pdimm->primary_sdram_width = spd->primw;
pdimm->ec_sdram_width = spd->ecw;
/*
* FIXME: Need to determine registered_dimm status.
* 1 == register buffered
* 0 == unbuffered
*/
pdimm->registered_dimm = 0; /* unbuffered */
/* SDRAM device parameters */
pdimm->n_row_addr = spd->nrow_addr;
pdimm->n_col_addr = spd->ncol_addr;
pdimm->n_banks_per_sdram_device = spd->nbanks;
pdimm->edc_config = spd->config;
pdimm->burst_lengths_bitmask = spd->burstl;
pdimm->row_density = spd->bank_dens;
/*
* Calculate the Maximum Data Rate based on the Minimum Cycle time.
* The SPD clk_cycle field (tCKmin) is measured in tenths of
* nanoseconds and represented as BCD.
*/
pdimm->tCKmin_X_ps
= convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle);
pdimm->tCKmin_X_minus_1_ps
= convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle2);
pdimm->tCKmin_X_minus_2_ps
= convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle3);
pdimm->tCKmax_ps = compute_tckmax_from_spd_ps(spd->tckmax);
/*
* Compute CAS latencies defined by SPD
* The SPD caslat_X should have at least 1 and at most 3 bits set.
*
* If cas_lat after masking is 0, the __ilog2 function returns
* 255 into the variable. This behavior is abused once.
*/
pdimm->caslat_X = __ilog2(spd->cas_lat);
pdimm->caslat_X_minus_1 = __ilog2(spd->cas_lat
& ~(1 << pdimm->caslat_X));
pdimm->caslat_X_minus_2 = __ilog2(spd->cas_lat
& ~(1 << pdimm->caslat_X)
& ~(1 << pdimm->caslat_X_minus_1));
/* Compute CAS latencies below that defined by SPD */
pdimm->caslat_lowest_derated
= compute_derated_DDR1_CAS_latency(get_memory_clk_period_ps());
/* Compute timing parameters */
pdimm->tRCD_ps = spd->trcd * 250;
pdimm->tRP_ps = spd->trp * 250;
pdimm->tRAS_ps = spd->tras * 1000;
pdimm->tWR_ps = mclk_to_picos(3);
pdimm->tWTR_ps = mclk_to_picos(1);
pdimm->tRFC_ps = compute_trfc_ps_from_spd(0, spd->trfc);
pdimm->tRRD_ps = spd->trrd * 250;
pdimm->tRC_ps = compute_trc_ps_from_spd(0, spd->trc);
pdimm->refresh_rate_ps = determine_refresh_rate_ps(spd->refresh);
pdimm->tIS_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_setup);
pdimm->tIH_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_hold);
pdimm->tDS_ps
= convert_bcd_hundredths_to_cycle_time_ps(spd->data_setup);
pdimm->tDH_ps
= convert_bcd_hundredths_to_cycle_time_ps(spd->data_hold);
pdimm->tRTP_ps = mclk_to_picos(2); /* By the book. */
pdimm->tDQSQ_max_ps = spd->tdqsq * 10;
pdimm->tQHS_ps = spd->tqhs * 10;
return 0;
}

View File

@ -0,0 +1,339 @@
/*
* Copyright 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* Version 2 as published by the Free Software Foundation.
*/
#include <common.h>
#include <asm/fsl_ddr_sdram.h>
#include "ddr.h"
/*
* Calculate the Density of each Physical Rank.
* Returned size is in bytes.
*
* Study these table from Byte 31 of JEDEC SPD Spec.
*
* DDR I DDR II
* Bit Size Size
* --- ----- ------
* 7 high 512MB 512MB
* 6 256MB 256MB
* 5 128MB 128MB
* 4 64MB 16GB
* 3 32MB 8GB
* 2 16MB 4GB
* 1 2GB 2GB
* 0 low 1GB 1GB
*
* Reorder Table to be linear by stripping the bottom
* 2 or 5 bits off and shifting them up to the top.
*
*/
static unsigned long long
compute_ranksize(unsigned int mem_type, unsigned char row_dens)
{
unsigned long long bsize;
/* Bottom 5 bits up to the top. */
bsize = ((row_dens >> 5) | ((row_dens & 31) << 3));
bsize <<= 27ULL;
debug("DDR: DDR II rank density = 0x%08x\n", bsize);
return bsize;
}
/*
* Convert a two-nibble BCD value into a cycle time.
* While the spec calls for nano-seconds, picos are returned.
*
* This implements the tables for bytes 9, 23 and 25 for both
* DDR I and II. No allowance for distinguishing the invalid
* fields absent for DDR I yet present in DDR II is made.
* (That is, cycle times of .25, .33, .66 and .75 ns are
* allowed for both DDR II and I.)
*/
static unsigned int
convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val)
{
/* Table look up the lower nibble, allow DDR I & II. */
unsigned int tenths_ps[16] = {
0,
100,
200,
300,
400,
500,
600,
700,
800,
900,
250, /* This and the next 3 entries valid ... */
330, /* ... only for tCK calculations. */
660,
750,
0, /* undefined */
0 /* undefined */
};
unsigned int whole_ns = (spd_val & 0xF0) >> 4;
unsigned int tenth_ns = spd_val & 0x0F;
unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns];
return ps;
}
static unsigned int
convert_bcd_hundredths_to_cycle_time_ps(unsigned int spd_val)
{
unsigned int tenth_ns = (spd_val & 0xF0) >> 4;
unsigned int hundredth_ns = spd_val & 0x0F;
unsigned int ps = tenth_ns * 100 + hundredth_ns * 10;
return ps;
}
static unsigned int byte40_table_ps[8] = {
0,
250,
330,
500,
660,
750,
0, /* supposed to be RFC, but not sure what that means */
0 /* Undefined */
};
static unsigned int
compute_trfc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trfc)
{
unsigned int trfc_ps;
trfc_ps = (((trctrfc_ext & 0x1) * 256) + trfc) * 1000
+ byte40_table_ps[(trctrfc_ext >> 1) & 0x7];
return trfc_ps;
}
static unsigned int
compute_trc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trc)
{
unsigned int trc_ps;
trc_ps = trc * 1000 + byte40_table_ps[(trctrfc_ext >> 4) & 0x7];
return trc_ps;
}
/*
* Determine Refresh Rate. Ignore self refresh bit on DDR I.
* Table from SPD Spec, Byte 12, converted to picoseconds and
* filled in with "default" normal values.
*/
static unsigned int
determine_refresh_rate_ps(const unsigned int spd_refresh)
{
unsigned int refresh_time_ps[8] = {
15625000, /* 0 Normal 1.00x */
3900000, /* 1 Reduced .25x */
7800000, /* 2 Extended .50x */
31300000, /* 3 Extended 2.00x */
62500000, /* 4 Extended 4.00x */
125000000, /* 5 Extended 8.00x */
15625000, /* 6 Normal 1.00x filler */
15625000, /* 7 Normal 1.00x filler */
};
return refresh_time_ps[spd_refresh & 0x7];
}
/*
* The purpose of this function is to compute a suitable
* CAS latency given the DRAM clock period. The SPD only
* defines at most 3 CAS latencies. Typically the slower in
* frequency the DIMM runs at, the shorter its CAS latency can.
* be. If the DIMM is operating at a sufficiently low frequency,
* it may be able to run at a CAS latency shorter than the
* shortest SPD-defined CAS latency.
*
* If a CAS latency is not found, 0 is returned.
*
* Do this by finding in the standard speed bin table the longest
* tCKmin that doesn't exceed the value of mclk_ps (tCK).
*
* An assumption made is that the SDRAM device allows the
* CL to be programmed for a value that is lower than those
* advertised by the SPD. This is not always the case,
* as those modes not defined in the SPD are optional.
*
* CAS latency de-rating based upon values JEDEC Standard No. 79-2C
* Table 40, "DDR2 SDRAM stanadard speed bins and tCK, tRCD, tRP, tRAS,
* and tRC for corresponding bin"
*
* ordinal 2, ddr2_speed_bins[1] contains tCK for CL=3
* Not certain if any good value exists for CL=2
*/
/* CL2 CL3 CL4 CL5 CL6 */
unsigned short ddr2_speed_bins[] = { 0, 5000, 3750, 3000, 2500 };
unsigned int
compute_derated_DDR2_CAS_latency(unsigned int mclk_ps)
{
const unsigned int num_speed_bins = ARRAY_SIZE(ddr2_speed_bins);
unsigned int lowest_tCKmin_found = 0;
unsigned int lowest_tCKmin_CL = 0;
unsigned int i;
debug("mclk_ps = %u\n", mclk_ps);
for (i = 0; i < num_speed_bins; i++) {
unsigned int x = ddr2_speed_bins[i];
debug("i=%u, x = %u, lowest_tCKmin_found = %u\n",
i, x, lowest_tCKmin_found);
if (x && x <= mclk_ps && x >= lowest_tCKmin_found ) {
lowest_tCKmin_found = x;
lowest_tCKmin_CL = i + 2;
}
}
debug("lowest_tCKmin_CL = %u\n", lowest_tCKmin_CL);
return lowest_tCKmin_CL;
}
/*
* ddr_compute_dimm_parameters for DDR2 SPD
*
* Compute DIMM parameters based upon the SPD information in spd.
* Writes the results to the dimm_params_t structure pointed by pdimm.
*
* FIXME: use #define for the retvals
*/
unsigned int
ddr_compute_dimm_parameters(const ddr2_spd_eeprom_t *spd,
dimm_params_t *pdimm,
unsigned int dimm_number)
{
unsigned int retval;
if (spd->mem_type) {
if (spd->mem_type != SPD_MEMTYPE_DDR2) {
printf("DIMM %u: is not a DDR2 SPD.\n", dimm_number);
return 1;
}
} else {
memset(pdimm, 0, sizeof(dimm_params_t));
return 1;
}
retval = ddr2_spd_check(spd);
if (retval) {
printf("DIMM %u: failed checksum\n", dimm_number);
return 2;
}
/*
* The part name in ASCII in the SPD EEPROM is not null terminated.
* Guarantee null termination here by presetting all bytes to 0
* and copying the part name in ASCII from the SPD onto it
*/
memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
/* DIMM organization parameters */
pdimm->n_ranks = (spd->mod_ranks & 0x7) + 1;
pdimm->rank_density = compute_ranksize(spd->mem_type, spd->rank_dens);
pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
pdimm->data_width = spd->dataw;
pdimm->primary_sdram_width = spd->primw;
pdimm->ec_sdram_width = spd->ecw;
/* FIXME: what about registered SO-DIMM? */
switch (spd->dimm_type) {
case 0x01: /* RDIMM */
case 0x10: /* Mini-RDIMM */
pdimm->registered_dimm = 1; /* register buffered */
break;
case 0x02: /* UDIMM */
case 0x04: /* SO-DIMM */
case 0x08: /* Micro-DIMM */
case 0x20: /* Mini-UDIMM */
pdimm->registered_dimm = 0; /* unbuffered */
break;
default:
printf("unknown dimm_type 0x%02X\n", spd->dimm_type);
return 1;
break;
}
/* SDRAM device parameters */
pdimm->n_row_addr = spd->nrow_addr;
pdimm->n_col_addr = spd->ncol_addr;
pdimm->n_banks_per_sdram_device = spd->nbanks;
pdimm->edc_config = spd->config;
pdimm->burst_lengths_bitmask = spd->burstl;
pdimm->row_density = spd->rank_dens;
/*
* Calculate the Maximum Data Rate based on the Minimum Cycle time.
* The SPD clk_cycle field (tCKmin) is measured in tenths of
* nanoseconds and represented as BCD.
*/
pdimm->tCKmin_X_ps
= convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle);
pdimm->tCKmin_X_minus_1_ps
= convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle2);
pdimm->tCKmin_X_minus_2_ps
= convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle3);
pdimm->tCKmax_ps = convert_bcd_tenths_to_cycle_time_ps(spd->tckmax);
/*
* Compute CAS latencies defined by SPD
* The SPD caslat_X should have at least 1 and at most 3 bits set.
*
* If cas_lat after masking is 0, the __ilog2 function returns
* 255 into the variable. This behavior is abused once.
*/
pdimm->caslat_X = __ilog2(spd->cas_lat);
pdimm->caslat_X_minus_1 = __ilog2(spd->cas_lat
& ~(1 << pdimm->caslat_X));
pdimm->caslat_X_minus_2 = __ilog2(spd->cas_lat
& ~(1 << pdimm->caslat_X)
& ~(1 << pdimm->caslat_X_minus_1));
/* Compute CAS latencies below that defined by SPD */
pdimm->caslat_lowest_derated
= compute_derated_DDR2_CAS_latency(get_memory_clk_period_ps());
/* Compute timing parameters */
pdimm->tRCD_ps = spd->trcd * 250;
pdimm->tRP_ps = spd->trp * 250;
pdimm->tRAS_ps = spd->tras * 1000;
pdimm->tWR_ps = spd->twr * 250;
pdimm->tWTR_ps = spd->twtr * 250;
pdimm->tRFC_ps = compute_trfc_ps_from_spd(spd->trctrfc_ext, spd->trfc);
pdimm->tRRD_ps = spd->trrd * 250;
pdimm->tRC_ps = compute_trc_ps_from_spd(spd->trctrfc_ext, spd->trc);
pdimm->refresh_rate_ps = determine_refresh_rate_ps(spd->refresh);
pdimm->tIS_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_setup);
pdimm->tIH_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_hold);
pdimm->tDS_ps
= convert_bcd_hundredths_to_cycle_time_ps(spd->data_setup);
pdimm->tDH_ps
= convert_bcd_hundredths_to_cycle_time_ps(spd->data_hold);
pdimm->tRTP_ps = spd->trtp * 250;
pdimm->tDQSQ_max_ps = spd->tdqsq * 10;
pdimm->tQHS_ps = spd->tqhs * 10;
return 0;
}

View File

@ -0,0 +1,314 @@
/*
* Copyright 2008-2009 Freescale Semiconductor, Inc.
* Dave Liu <daveliu@freescale.com>
*
* calculate the organization and timing parameter
* from ddr3 spd, please refer to the spec
* JEDEC standard No.21-C 4_01_02_11R18.pdf
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* Version 2 as published by the Free Software Foundation.
*/
#include <common.h>
#include <asm/fsl_ddr_sdram.h>
#include "ddr.h"
/*
* Calculate the Density of each Physical Rank.
* Returned size is in bytes.
*
* each rank size =
* sdram capacity(bit) / 8 * primary bus width / sdram width
*
* where: sdram capacity = spd byte4[3:0]
* primary bus width = spd byte8[2:0]
* sdram width = spd byte7[2:0]
*
* SPD byte4 - sdram density and banks
* bit[3:0] size(bit) size(byte)
* 0000 256Mb 32MB
* 0001 512Mb 64MB
* 0010 1Gb 128MB
* 0011 2Gb 256MB
* 0100 4Gb 512MB
* 0101 8Gb 1GB
* 0110 16Gb 2GB
*
* SPD byte8 - module memory bus width
* bit[2:0] primary bus width
* 000 8bits
* 001 16bits
* 010 32bits
* 011 64bits
*
* SPD byte7 - module organiztion
* bit[2:0] sdram device width
* 000 4bits
* 001 8bits
* 010 16bits
* 011 32bits
*
*/
static unsigned long long
compute_ranksize(const ddr3_spd_eeprom_t *spd)
{
unsigned long long bsize;
int nbit_sdram_cap_bsize = 0;
int nbit_primary_bus_width = 0;
int nbit_sdram_width = 0;
if ((spd->density_banks & 0xf) < 7)
nbit_sdram_cap_bsize = (spd->density_banks & 0xf) + 28;
if ((spd->bus_width & 0x7) < 4)
nbit_primary_bus_width = (spd->bus_width & 0x7) + 3;
if ((spd->organization & 0x7) < 4)
nbit_sdram_width = (spd->organization & 0x7) + 2;
bsize = 1ULL << (nbit_sdram_cap_bsize - 3
+ nbit_primary_bus_width - nbit_sdram_width);
debug("DDR: DDR III rank density = 0x%16lx\n", bsize);
return bsize;
}
/*
* ddr_compute_dimm_parameters for DDR3 SPD
*
* Compute DIMM parameters based upon the SPD information in spd.
* Writes the results to the dimm_params_t structure pointed by pdimm.
*
*/
unsigned int
ddr_compute_dimm_parameters(const ddr3_spd_eeprom_t *spd,
dimm_params_t *pdimm,
unsigned int dimm_number)
{
unsigned int retval;
unsigned int mtb_ps;
if (spd->mem_type) {
if (spd->mem_type != SPD_MEMTYPE_DDR3) {
printf("DIMM %u: is not a DDR3 SPD.\n", dimm_number);
return 1;
}
} else {
memset(pdimm, 0, sizeof(dimm_params_t));
return 1;
}
retval = ddr3_spd_check(spd);
if (retval) {
printf("DIMM %u: failed checksum\n", dimm_number);
return 2;
}
/*
* The part name in ASCII in the SPD EEPROM is not null terminated.
* Guarantee null termination here by presetting all bytes to 0
* and copying the part name in ASCII from the SPD onto it
*/
memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
/* DIMM organization parameters */
pdimm->n_ranks = ((spd->organization >> 3) & 0x7) + 1;
pdimm->rank_density = compute_ranksize(spd);
pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
pdimm->primary_sdram_width = 1 << (3 + (spd->bus_width & 0x7));
if ((spd->bus_width >> 3) & 0x3)
pdimm->ec_sdram_width = 8;
else
pdimm->ec_sdram_width = 0;
pdimm->data_width = pdimm->primary_sdram_width
+ pdimm->ec_sdram_width;
switch (spd->module_type & 0xf) {
case 0x01: /* RDIMM */
case 0x05: /* Mini-RDIMM */
pdimm->registered_dimm = 1; /* register buffered */
break;
case 0x02: /* UDIMM */
case 0x03: /* SO-DIMM */
case 0x04: /* Micro-DIMM */
case 0x06: /* Mini-UDIMM */
pdimm->registered_dimm = 0; /* unbuffered */
break;
default:
printf("unknown dimm_type 0x%02X\n", spd->module_type);
return 1;
}
/* SDRAM device parameters */
pdimm->n_row_addr = ((spd->addressing >> 3) & 0x7) + 12;
pdimm->n_col_addr = (spd->addressing & 0x7) + 9;
pdimm->n_banks_per_sdram_device = 8 << ((spd->density_banks >> 4) & 0x7);
/*
* The SPD spec has not the ECC bit,
* We consider the DIMM as ECC capability
* when the extension bus exist
*/
if (pdimm->ec_sdram_width)
pdimm->edc_config = 0x02;
else
pdimm->edc_config = 0x00;
/*
* The SPD spec has not the burst length byte
* but DDR3 spec has nature BL8 and BC4,
* BL8 -bit3, BC4 -bit2
*/
pdimm->burst_lengths_bitmask = 0x0c;
pdimm->row_density = __ilog2(pdimm->rank_density);
/* MTB - medium timebase
* The unit in the SPD spec is ns,
* We convert it to ps.
* eg: MTB = 0.125ns (125ps)
*/
mtb_ps = (spd->mtb_dividend * 1000) /spd->mtb_divisor;
pdimm->mtb_ps = mtb_ps;
/*
* sdram minimum cycle time
* we assume the MTB is 0.125ns
* eg:
* tCK_min=15 MTB (1.875ns) ->DDR3-1066
* =12 MTB (1.5ns) ->DDR3-1333
* =10 MTB (1.25ns) ->DDR3-1600
*/
pdimm->tCKmin_X_ps = spd->tCK_min * mtb_ps;
/*
* CAS latency supported
* bit4 - CL4
* bit5 - CL5
* bit18 - CL18
*/
pdimm->caslat_X = ((spd->caslat_msb << 8) | spd->caslat_lsb) << 4;
/*
* min CAS latency time
* eg: tAA_min =
* DDR3-800D 100 MTB (12.5ns)
* DDR3-1066F 105 MTB (13.125ns)
* DDR3-1333H 108 MTB (13.5ns)
* DDR3-1600H 90 MTB (11.25ns)
*/
pdimm->tAA_ps = spd->tAA_min * mtb_ps;
/*
* min write recovery time
* eg:
* tWR_min = 120 MTB (15ns) -> all speed grades.
*/
pdimm->tWR_ps = spd->tWR_min * mtb_ps;
/*
* min RAS to CAS delay time
* eg: tRCD_min =
* DDR3-800 100 MTB (12.5ns)
* DDR3-1066F 105 MTB (13.125ns)
* DDR3-1333H 108 MTB (13.5ns)
* DDR3-1600H 90 MTB (11.25)
*/
pdimm->tRCD_ps = spd->tRCD_min * mtb_ps;
/*
* min row active to row active delay time
* eg: tRRD_min =
* DDR3-800(1KB page) 80 MTB (10ns)
* DDR3-1333(1KB page) 48 MTB (6ns)
*/
pdimm->tRRD_ps = spd->tRRD_min * mtb_ps;
/*
* min row precharge delay time
* eg: tRP_min =
* DDR3-800D 100 MTB (12.5ns)
* DDR3-1066F 105 MTB (13.125ns)
* DDR3-1333H 108 MTB (13.5ns)
* DDR3-1600H 90 MTB (11.25ns)
*/
pdimm->tRP_ps = spd->tRP_min * mtb_ps;
/* min active to precharge delay time
* eg: tRAS_min =
* DDR3-800D 300 MTB (37.5ns)
* DDR3-1066F 300 MTB (37.5ns)
* DDR3-1333H 288 MTB (36ns)
* DDR3-1600H 280 MTB (35ns)
*/
pdimm->tRAS_ps = (((spd->tRAS_tRC_ext & 0xf) << 8) | spd->tRAS_min_lsb)
* mtb_ps;
/*
* min active to actice/refresh delay time
* eg: tRC_min =
* DDR3-800D 400 MTB (50ns)
* DDR3-1066F 405 MTB (50.625ns)
* DDR3-1333H 396 MTB (49.5ns)
* DDR3-1600H 370 MTB (46.25ns)
*/
pdimm->tRC_ps = (((spd->tRAS_tRC_ext & 0xf0) << 4) | spd->tRC_min_lsb)
* mtb_ps;
/*
* min refresh recovery delay time
* eg: tRFC_min =
* 512Mb 720 MTB (90ns)
* 1Gb 880 MTB (110ns)
* 2Gb 1280 MTB (160ns)
*/
pdimm->tRFC_ps = ((spd->tRFC_min_msb << 8) | spd->tRFC_min_lsb)
* mtb_ps;
/*
* min internal write to read command delay time
* eg: tWTR_min = 40 MTB (7.5ns) - all speed bins.
* tWRT is at least 4 mclk independent of operating freq.
*/
pdimm->tWTR_ps = spd->tWTR_min * mtb_ps;
/*
* min internal read to precharge command delay time
* eg: tRTP_min = 40 MTB (7.5ns) - all speed bins.
* tRTP is at least 4 mclk independent of operating freq.
*/
pdimm->tRTP_ps = spd->tRTP_min * mtb_ps;
/*
* Average periodic refresh interval
* tREFI = 7.8 us at normal temperature range
* = 3.9 us at ext temperature range
*/
pdimm->refresh_rate_ps = 7800000;
/*
* min four active window delay time
* eg: tFAW_min =
* DDR3-800(1KB page) 320 MTB (40ns)
* DDR3-1066(1KB page) 300 MTB (37.5ns)
* DDR3-1333(1KB page) 240 MTB (30ns)
* DDR3-1600(1KB page) 240 MTB (30ns)
*/
pdimm->tFAW_ps = (((spd->tFAW_msb & 0xf) << 8) | spd->tFAW_min)
* mtb_ps;
/*
* We need check the address mirror for unbuffered DIMM
* If SPD indicate the address map mirror, The DDR controller
* need care it.
*/
if ((spd->module_type == SPD_MODULETYPE_UDIMM) ||
(spd->module_type == SPD_MODULETYPE_SODIMM) ||
(spd->module_type == SPD_MODULETYPE_MICRODIMM) ||
(spd->module_type == SPD_MODULETYPE_MINIUDIMM))
pdimm->mirrored_dimm = spd->mod_section.unbuffered.addr_mapping & 0x1;
return 0;
}

View File

@ -0,0 +1,468 @@
/*
* Copyright 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* Version 2 as published by the Free Software Foundation.
*/
#include <common.h>
#include <asm/fsl_ddr_sdram.h>
#include "ddr.h"
unsigned int
compute_cas_latency_ddr3(const dimm_params_t *dimm_params,
common_timing_params_t *outpdimm,
unsigned int number_of_dimms)
{
unsigned int i;
unsigned int tAAmin_ps = 0;
unsigned int tCKmin_X_ps = 0;
unsigned int common_caslat;
unsigned int caslat_actual;
unsigned int retry = 16;
unsigned int tmp;
const unsigned int mclk_ps = get_memory_clk_period_ps();
/* compute the common CAS latency supported between slots */
tmp = dimm_params[0].caslat_X;
for (i = 1; i < number_of_dimms; i++)
tmp &= dimm_params[i].caslat_X;
common_caslat = tmp;
/* compute the max tAAmin tCKmin between slots */
for (i = 0; i < number_of_dimms; i++) {
tAAmin_ps = max(tAAmin_ps, dimm_params[i].tAA_ps);
tCKmin_X_ps = max(tCKmin_X_ps, dimm_params[i].tCKmin_X_ps);
}
/* validate if the memory clk is in the range of dimms */
if (mclk_ps < tCKmin_X_ps) {
printf("The DIMM max tCKmin is %d ps,"
"doesn't support the MCLK cycle %d ps\n",
tCKmin_X_ps, mclk_ps);
return 1;
}
/* determine the acutal cas latency */
caslat_actual = (tAAmin_ps + mclk_ps - 1) / mclk_ps;
/* check if the dimms support the CAS latency */
while (!(common_caslat & (1 << caslat_actual)) && retry > 0) {
caslat_actual++;
retry--;
}
/* once the caculation of caslat_actual is completed
* we must verify that this CAS latency value does not
* exceed tAAmax, which is 20 ns for all DDR3 speed grades
*/
if (caslat_actual * mclk_ps > 20000) {
printf("The choosen cas latency %d is too large\n",
caslat_actual);
return 1;
}
outpdimm->lowest_common_SPD_caslat = caslat_actual;
return 0;
}
/*
* compute_lowest_common_dimm_parameters()
*
* Determine the worst-case DIMM timing parameters from the set of DIMMs
* whose parameters have been computed into the array pointed to
* by dimm_params.
*/
unsigned int
compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
common_timing_params_t *outpdimm,
unsigned int number_of_dimms)
{
unsigned int i;
unsigned int tCKmin_X_ps = 0;
unsigned int tCKmax_ps = 0xFFFFFFFF;
unsigned int tCKmax_max_ps = 0;
unsigned int tRCD_ps = 0;
unsigned int tRP_ps = 0;
unsigned int tRAS_ps = 0;
unsigned int tWR_ps = 0;
unsigned int tWTR_ps = 0;
unsigned int tRFC_ps = 0;
unsigned int tRRD_ps = 0;
unsigned int tRC_ps = 0;
unsigned int refresh_rate_ps = 0;
unsigned int tIS_ps = 0;
unsigned int tIH_ps = 0;
unsigned int tDS_ps = 0;
unsigned int tDH_ps = 0;
unsigned int tRTP_ps = 0;
unsigned int tDQSQ_max_ps = 0;
unsigned int tQHS_ps = 0;
unsigned int temp1, temp2;
unsigned int additive_latency = 0;
#if !defined(CONFIG_FSL_DDR3)
const unsigned int mclk_ps = get_memory_clk_period_ps();
unsigned int lowest_good_caslat;
unsigned int not_ok;
debug("using mclk_ps = %u\n", mclk_ps);
#endif
temp1 = 0;
for (i = 0; i < number_of_dimms; i++) {
/*
* If there are no ranks on this DIMM,
* it probably doesn't exist, so skip it.
*/
if (dimm_params[i].n_ranks == 0) {
temp1++;
continue;
}
/*
* Find minimum tCKmax_ps to find fastest slow speed,
* i.e., this is the slowest the whole system can go.
*/
tCKmax_ps = min(tCKmax_ps, dimm_params[i].tCKmax_ps);
/* Either find maximum value to determine slowest
* speed, delay, time, period, etc */
tCKmin_X_ps = max(tCKmin_X_ps, dimm_params[i].tCKmin_X_ps);
tCKmax_max_ps = max(tCKmax_max_ps, dimm_params[i].tCKmax_ps);
tRCD_ps = max(tRCD_ps, dimm_params[i].tRCD_ps);
tRP_ps = max(tRP_ps, dimm_params[i].tRP_ps);
tRAS_ps = max(tRAS_ps, dimm_params[i].tRAS_ps);
tWR_ps = max(tWR_ps, dimm_params[i].tWR_ps);
tWTR_ps = max(tWTR_ps, dimm_params[i].tWTR_ps);
tRFC_ps = max(tRFC_ps, dimm_params[i].tRFC_ps);
tRRD_ps = max(tRRD_ps, dimm_params[i].tRRD_ps);
tRC_ps = max(tRC_ps, dimm_params[i].tRC_ps);
tIS_ps = max(tIS_ps, dimm_params[i].tIS_ps);
tIH_ps = max(tIH_ps, dimm_params[i].tIH_ps);
tDS_ps = max(tDS_ps, dimm_params[i].tDS_ps);
tDH_ps = max(tDH_ps, dimm_params[i].tDH_ps);
tRTP_ps = max(tRTP_ps, dimm_params[i].tRTP_ps);
tQHS_ps = max(tQHS_ps, dimm_params[i].tQHS_ps);
refresh_rate_ps = max(refresh_rate_ps,
dimm_params[i].refresh_rate_ps);
/*
* Find maximum tDQSQ_max_ps to find slowest.
*
* FIXME: is finding the slowest value the correct
* strategy for this parameter?
*/
tDQSQ_max_ps = max(tDQSQ_max_ps, dimm_params[i].tDQSQ_max_ps);
}
outpdimm->ndimms_present = number_of_dimms - temp1;
if (temp1 == number_of_dimms) {
debug("no dimms this memory controller\n");
return 0;
}
outpdimm->tCKmin_X_ps = tCKmin_X_ps;
outpdimm->tCKmax_ps = tCKmax_ps;
outpdimm->tCKmax_max_ps = tCKmax_max_ps;
outpdimm->tRCD_ps = tRCD_ps;
outpdimm->tRP_ps = tRP_ps;
outpdimm->tRAS_ps = tRAS_ps;
outpdimm->tWR_ps = tWR_ps;
outpdimm->tWTR_ps = tWTR_ps;
outpdimm->tRFC_ps = tRFC_ps;
outpdimm->tRRD_ps = tRRD_ps;
outpdimm->tRC_ps = tRC_ps;
outpdimm->refresh_rate_ps = refresh_rate_ps;
outpdimm->tIS_ps = tIS_ps;
outpdimm->tIH_ps = tIH_ps;
outpdimm->tDS_ps = tDS_ps;
outpdimm->tDH_ps = tDH_ps;
outpdimm->tRTP_ps = tRTP_ps;
outpdimm->tDQSQ_max_ps = tDQSQ_max_ps;
outpdimm->tQHS_ps = tQHS_ps;
/* Determine common burst length for all DIMMs. */
temp1 = 0xff;
for (i = 0; i < number_of_dimms; i++) {
if (dimm_params[i].n_ranks) {
temp1 &= dimm_params[i].burst_lengths_bitmask;
}
}
outpdimm->all_DIMMs_burst_lengths_bitmask = temp1;
/* Determine if all DIMMs registered buffered. */
temp1 = temp2 = 0;
for (i = 0; i < number_of_dimms; i++) {
if (dimm_params[i].n_ranks) {
if (dimm_params[i].registered_dimm)
temp1 = 1;
if (!dimm_params[i].registered_dimm)
temp2 = 1;
}
}
outpdimm->all_DIMMs_registered = 0;
if (temp1 && !temp2) {
outpdimm->all_DIMMs_registered = 1;
}
outpdimm->all_DIMMs_unbuffered = 0;
if (!temp1 && temp2) {
outpdimm->all_DIMMs_unbuffered = 1;
}
/* CHECKME: */
if (!outpdimm->all_DIMMs_registered
&& !outpdimm->all_DIMMs_unbuffered) {
printf("ERROR: Mix of registered buffered and unbuffered "
"DIMMs detected!\n");
}
#if defined(CONFIG_FSL_DDR3)
if (compute_cas_latency_ddr3(dimm_params, outpdimm, number_of_dimms))
return 1;
#else
/*
* Compute a CAS latency suitable for all DIMMs
*
* Strategy for SPD-defined latencies: compute only
* CAS latency defined by all DIMMs.
*/
/*
* Step 1: find CAS latency common to all DIMMs using bitwise
* operation.
*/
temp1 = 0xFF;
for (i = 0; i < number_of_dimms; i++) {
if (dimm_params[i].n_ranks) {
temp2 = 0;
temp2 |= 1 << dimm_params[i].caslat_X;
temp2 |= 1 << dimm_params[i].caslat_X_minus_1;
temp2 |= 1 << dimm_params[i].caslat_X_minus_2;
/*
* FIXME: If there was no entry for X-2 (X-1) in
* the SPD, then caslat_X_minus_2
* (caslat_X_minus_1) contains either 255 or
* 0xFFFFFFFF because that's what the glorious
* __ilog2 function returns for an input of 0.
* On 32-bit PowerPC, left shift counts with bit
* 26 set (that the value of 255 or 0xFFFFFFFF
* will have), cause the destination register to
* be 0. That is why this works.
*/
temp1 &= temp2;
}
}
/*
* Step 2: check each common CAS latency against tCK of each
* DIMM's SPD.
*/
lowest_good_caslat = 0;
temp2 = 0;
while (temp1) {
not_ok = 0;
temp2 = __ilog2(temp1);
debug("checking common caslat = %u\n", temp2);
/* Check if this CAS latency will work on all DIMMs at tCK. */
for (i = 0; i < number_of_dimms; i++) {
if (!dimm_params[i].n_ranks) {
continue;
}
if (dimm_params[i].caslat_X == temp2) {
if (mclk_ps >= dimm_params[i].tCKmin_X_ps) {
debug("CL = %u ok on DIMM %u at tCK=%u"
" ps with its tCKmin_X_ps of %u\n",
temp2, i, mclk_ps,
dimm_params[i].tCKmin_X_ps);
continue;
} else {
not_ok++;
}
}
if (dimm_params[i].caslat_X_minus_1 == temp2) {
unsigned int tCKmin_X_minus_1_ps
= dimm_params[i].tCKmin_X_minus_1_ps;
if (mclk_ps >= tCKmin_X_minus_1_ps) {
debug("CL = %u ok on DIMM %u at "
"tCK=%u ps with its "
"tCKmin_X_minus_1_ps of %u\n",
temp2, i, mclk_ps,
tCKmin_X_minus_1_ps);
continue;
} else {
not_ok++;
}
}
if (dimm_params[i].caslat_X_minus_2 == temp2) {
unsigned int tCKmin_X_minus_2_ps
= dimm_params[i].tCKmin_X_minus_2_ps;
if (mclk_ps >= tCKmin_X_minus_2_ps) {
debug("CL = %u ok on DIMM %u at "
"tCK=%u ps with its "
"tCKmin_X_minus_2_ps of %u\n",
temp2, i, mclk_ps,
tCKmin_X_minus_2_ps);
continue;
} else {
not_ok++;
}
}
}
if (!not_ok) {
lowest_good_caslat = temp2;
}
temp1 &= ~(1 << temp2);
}
debug("lowest common SPD-defined CAS latency = %u\n",
lowest_good_caslat);
outpdimm->lowest_common_SPD_caslat = lowest_good_caslat;
/*
* Compute a common 'de-rated' CAS latency.
*
* The strategy here is to find the *highest* dereated cas latency
* with the assumption that all of the DIMMs will support a dereated
* CAS latency higher than or equal to their lowest dereated value.
*/
temp1 = 0;
for (i = 0; i < number_of_dimms; i++) {
temp1 = max(temp1, dimm_params[i].caslat_lowest_derated);
}
outpdimm->highest_common_derated_caslat = temp1;
debug("highest common dereated CAS latency = %u\n", temp1);
#endif /* #if defined(CONFIG_FSL_DDR3) */
/* Determine if all DIMMs ECC capable. */
temp1 = 1;
for (i = 0; i < number_of_dimms; i++) {
if (dimm_params[i].n_ranks && dimm_params[i].edc_config != 2) {
temp1 = 0;
break;
}
}
if (temp1) {
debug("all DIMMs ECC capable\n");
} else {
debug("Warning: not all DIMMs ECC capable, cant enable ECC\n");
}
outpdimm->all_DIMMs_ECC_capable = temp1;
#ifndef CONFIG_FSL_DDR3
/* FIXME: move to somewhere else to validate. */
if (mclk_ps > tCKmax_max_ps) {
printf("Warning: some of the installed DIMMs "
"can not operate this slowly.\n");
return 1;
}
#endif
/*
* Compute additive latency.
*
* For DDR1, additive latency should be 0.
*
* For DDR2, with ODT enabled, use "a value" less than ACTTORW,
* which comes from Trcd, and also note that:
* add_lat + caslat must be >= 4
*
* For DDR3, we use the AL=0
*
* When to use additive latency for DDR2:
*
* I. Because you are using CL=3 and need to do ODT on writes and
* want functionality.
* 1. Are you going to use ODT? (Does your board not have
* additional termination circuitry for DQ, DQS, DQS_,
* DM, RDQS, RDQS_ for x4/x8 configs?)
* 2. If so, is your lowest supported CL going to be 3?
* 3. If so, then you must set AL=1 because
*
* WL >= 3 for ODT on writes
* RL = AL + CL
* WL = RL - 1
* ->
* WL = AL + CL - 1
* AL + CL - 1 >= 3
* AL + CL >= 4
* QED
*
* RL >= 3 for ODT on reads
* RL = AL + CL
*
* Since CL aren't usually less than 2, AL=0 is a minimum,
* so the WL-derived AL should be the -- FIXME?
*
* II. Because you are using auto-precharge globally and want to
* use additive latency (posted CAS) to get more bandwidth.
* 1. Are you going to use auto-precharge mode globally?
*
* Use addtivie latency and compute AL to be 1 cycle less than
* tRCD, i.e. the READ or WRITE command is in the cycle
* immediately following the ACTIVATE command..
*
* III. Because you feel like it or want to do some sort of
* degraded-performance experiment.
* 1. Do you just want to use additive latency because you feel
* like it?
*
* Validation: AL is less than tRCD, and within the other
* read-to-precharge constraints.
*/
additive_latency = 0;
#if defined(CONFIG_FSL_DDR2)
if (lowest_good_caslat < 4) {
additive_latency = picos_to_mclk(tRCD_ps) - lowest_good_caslat;
if (mclk_to_picos(additive_latency) > tRCD_ps) {
additive_latency = picos_to_mclk(tRCD_ps);
debug("setting additive_latency to %u because it was "
" greater than tRCD_ps\n", additive_latency);
}
}
#elif defined(CONFIG_FSL_DDR3)
/*
* The system will not use the global auto-precharge mode.
* However, it uses the page mode, so we set AL=0
*/
additive_latency = 0;
#endif
/*
* Validate additive latency
* FIXME: move to somewhere else to validate
*
* AL <= tRCD(min)
*/
if (mclk_to_picos(additive_latency) > tRCD_ps) {
printf("Error: invalid additive latency exceeds tRCD(min).\n");
return 1;
}
/*
* RL = CL + AL; RL >= 3 for ODT_RD_CFG to be enabled
* WL = RL - 1; WL >= 3 for ODT_WL_CFG to be enabled
* ADD_LAT (the register) must be set to a value less
* than ACTTORW if WL = 1, then AL must be set to 1
* RD_TO_PRE (the register) must be set to a minimum
* tRTP + AL if AL is nonzero
*/
/*
* Additive latency will be applied only if the memctl option to
* use it.
*/
outpdimm->additive_latency = additive_latency;
return 0;
}

View File

@ -0,0 +1,479 @@
/*
* Copyright 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* Version 2 as published by the Free Software Foundation.
*/
/*
* Generic driver for Freescale DDR/DDR2/DDR3 memory controller.
* Based on code from spd_sdram.c
* Author: James Yang [at freescale.com]
*/
#include <common.h>
#include <asm/fsl_ddr_sdram.h>
#include "ddr.h"
extern void fsl_ddr_set_lawbar(
const common_timing_params_t *memctl_common_params,
unsigned int memctl_interleaved,
unsigned int ctrl_num);
/* processor specific function */
extern void fsl_ddr_set_memctl_regs(const fsl_ddr_cfg_regs_t *regs,
unsigned int ctrl_num);
/* Board-specific functions defined in each board's ddr.c */
extern void fsl_ddr_get_spd(generic_spd_eeprom_t *ctrl_dimms_spd,
unsigned int ctrl_num);
/*
* ASSUMPTIONS:
* - Same number of CONFIG_DIMM_SLOTS_PER_CTLR on each controller
* - Same memory data bus width on all controllers
*
* NOTES:
*
* The memory controller and associated documentation use confusing
* terminology when referring to the orgranization of DRAM.
*
* Here is a terminology translation table:
*
* memory controller/documention |industry |this code |signals
* -------------------------------|-----------|-----------|-----------------
* physical bank/bank |rank |rank |chip select (CS)
* logical bank/sub-bank |bank |bank |bank address (BA)
* page/row |row |page |row address
* ??? |column |column |column address
*
* The naming confusion is further exacerbated by the descriptions of the
* memory controller interleaving feature, where accesses are interleaved
* _BETWEEN_ two seperate memory controllers. This is configured only in
* CS0_CONFIG[INTLV_CTL] of each memory controller.
*
* memory controller documentation | number of chip selects
* | per memory controller supported
* --------------------------------|-----------------------------------------
* cache line interleaving | 1 (CS0 only)
* page interleaving | 1 (CS0 only)
* bank interleaving | 1 (CS0 only)
* superbank interleraving | depends on bank (chip select)
* | interleraving [rank interleaving]
* | mode used on every memory controller
*
* Even further confusing is the existence of the interleaving feature
* _WITHIN_ each memory controller. The feature is referred to in
* documentation as chip select interleaving or bank interleaving,
* although it is configured in the DDR_SDRAM_CFG field.
*
* Name of field | documentation name | this code
* -----------------------------|-----------------------|------------------
* DDR_SDRAM_CFG[BA_INTLV_CTL] | Bank (chip select) | rank interleaving
* | interleaving
*/
#ifdef DEBUG
const char *step_string_tbl[] = {
"STEP_GET_SPD",
"STEP_COMPUTE_DIMM_PARMS",
"STEP_COMPUTE_COMMON_PARMS",
"STEP_GATHER_OPTS",
"STEP_ASSIGN_ADDRESSES",
"STEP_COMPUTE_REGS",
"STEP_PROGRAM_REGS",
"STEP_ALL"
};
const char * step_to_string(unsigned int step) {
unsigned int s = __ilog2(step);
if ((1 << s) != step)
return step_string_tbl[7];
return step_string_tbl[s];
}
#endif
int step_assign_addresses(fsl_ddr_info_t *pinfo,
unsigned int dbw_cap_adj[],
unsigned int *memctl_interleaving,
unsigned int *rank_interleaving)
{
int i, j;
/*
* If a reduced data width is requested, but the SPD
* specifies a physically wider device, adjust the
* computed dimm capacities accordingly before
* assigning addresses.
*/
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
unsigned int found = 0;
switch (pinfo->memctl_opts[i].data_bus_width) {
case 2:
/* 16-bit */
printf("can't handle 16-bit mode yet\n");
break;
case 1:
/* 32-bit */
for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
unsigned int dw;
dw = pinfo->dimm_params[i][j].data_width;
if (pinfo->dimm_params[i][j].n_ranks
&& (dw == 72 || dw == 64)) {
/*
* FIXME: can't really do it
* like this because this just
* further reduces the memory
*/
found = 1;
break;
}
}
if (found) {
dbw_cap_adj[i] = 1;
}
break;
case 0:
/* 64-bit */
break;
default:
printf("unexpected data bus width "
"specified controller %u\n", i);
return 1;
}
}
/*
* Check if all controllers are configured for memory
* controller interleaving.
*/
j = 0;
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
if (pinfo->memctl_opts[i].memctl_interleaving) {
j++;
}
}
if (j == 2)
*memctl_interleaving = 1;
/* Check that all controllers are rank interleaving. */
j = 0;
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
if (pinfo->memctl_opts[i].ba_intlv_ctl) {
j++;
}
}
if (j == 2)
*rank_interleaving = 1;
if (*memctl_interleaving) {
unsigned long long addr, total_mem_per_ctlr = 0;
/*
* If interleaving between memory controllers,
* make each controller start at a base address
* of 0.
*
* Also, if bank interleaving (chip select
* interleaving) is enabled on each memory
* controller, CS0 needs to be programmed to
* cover the entire memory range on that memory
* controller
*
* Bank interleaving also implies that each
* addressed chip select is identical in size.
*/
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
addr = 0;
pinfo->common_timing_params[i].base_address = 0ull;
for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
unsigned long long cap
= pinfo->dimm_params[i][j].capacity;
pinfo->dimm_params[i][j].base_address = addr;
addr += cap >> dbw_cap_adj[i];
total_mem_per_ctlr += cap >> dbw_cap_adj[i];
}
}
pinfo->common_timing_params[0].total_mem = total_mem_per_ctlr;
} else {
/*
* Simple linear assignment if memory
* controllers are not interleaved.
*/
unsigned long long cur_memsize = 0;
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
u64 total_mem_per_ctlr = 0;
pinfo->common_timing_params[i].base_address =
cur_memsize;
for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
/* Compute DIMM base addresses. */
unsigned long long cap =
pinfo->dimm_params[i][j].capacity;
pinfo->dimm_params[i][j].base_address =
cur_memsize;
cur_memsize += cap >> dbw_cap_adj[i];
total_mem_per_ctlr += cap >> dbw_cap_adj[i];
}
pinfo->common_timing_params[i].total_mem =
total_mem_per_ctlr;
}
}
return 0;
}
unsigned long long
fsl_ddr_compute(fsl_ddr_info_t *pinfo, unsigned int start_step)
{
unsigned int i, j;
unsigned int all_controllers_memctl_interleaving = 0;
unsigned int all_controllers_rank_interleaving = 0;
unsigned long long total_mem = 0;
fsl_ddr_cfg_regs_t *ddr_reg = pinfo->fsl_ddr_config_reg;
common_timing_params_t *timing_params = pinfo->common_timing_params;
/* data bus width capacity adjust shift amount */
unsigned int dbw_capacity_adjust[CONFIG_NUM_DDR_CONTROLLERS];
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
dbw_capacity_adjust[i] = 0;
}
debug("starting at step %u (%s)\n",
start_step, step_to_string(start_step));
switch (start_step) {
case STEP_GET_SPD:
/* STEP 1: Gather all DIMM SPD data */
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
fsl_ddr_get_spd(pinfo->spd_installed_dimms[i], i);
}
case STEP_COMPUTE_DIMM_PARMS:
/* STEP 2: Compute DIMM parameters from SPD data */
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
unsigned int retval;
generic_spd_eeprom_t *spd =
&(pinfo->spd_installed_dimms[i][j]);
dimm_params_t *pdimm =
&(pinfo->dimm_params[i][j]);
retval = compute_dimm_parameters(spd, pdimm, i);
if (retval == 2) {
printf("Error: compute_dimm_parameters"
" non-zero returned FATAL value "
"for memctl=%u dimm=%u\n", i, j);
return 0;
}
if (retval) {
debug("Warning: compute_dimm_parameters"
" non-zero return value for memctl=%u "
"dimm=%u\n", i, j);
}
}
}
case STEP_COMPUTE_COMMON_PARMS:
/*
* STEP 3: Compute a common set of timing parameters
* suitable for all of the DIMMs on each memory controller
*/
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
debug("Computing lowest common DIMM"
" parameters for memctl=%u\n", i);
compute_lowest_common_dimm_parameters(
pinfo->dimm_params[i],
&timing_params[i],
CONFIG_DIMM_SLOTS_PER_CTLR);
}
case STEP_GATHER_OPTS:
/* STEP 4: Gather configuration requirements from user */
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
debug("Reloading memory controller "
"configuration options for memctl=%u\n", i);
/*
* This "reloads" the memory controller options
* to defaults. If the user "edits" an option,
* next_step points to the step after this,
* which is currently STEP_ASSIGN_ADDRESSES.
*/
populate_memctl_options(
timing_params[i].all_DIMMs_registered,
&pinfo->memctl_opts[i],
pinfo->dimm_params[i], i);
}
case STEP_ASSIGN_ADDRESSES:
/* STEP 5: Assign addresses to chip selects */
step_assign_addresses(pinfo,
dbw_capacity_adjust,
&all_controllers_memctl_interleaving,
&all_controllers_rank_interleaving);
case STEP_COMPUTE_REGS:
/* STEP 6: compute controller register values */
debug("FSL Memory ctrl cg register computation\n");
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
if (timing_params[i].ndimms_present == 0) {
memset(&ddr_reg[i], 0,
sizeof(fsl_ddr_cfg_regs_t));
continue;
}
compute_fsl_memctl_config_regs(
&pinfo->memctl_opts[i],
&ddr_reg[i], &timing_params[i],
pinfo->dimm_params[i],
dbw_capacity_adjust[i]);
}
default:
break;
}
/* Compute the total amount of memory. */
/*
* If bank interleaving but NOT memory controller interleaving
* CS_BNDS describe the quantity of memory on each memory
* controller, so the total is the sum across.
*/
if (!all_controllers_memctl_interleaving
&& all_controllers_rank_interleaving) {
total_mem = 0;
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
total_mem += timing_params[i].total_mem;
}
} else {
/*
* Compute the amount of memory available just by
* looking for the highest valid CSn_BNDS value.
* This allows us to also experiment with using
* only CS0 when using dual-rank DIMMs.
*/
unsigned int max_end = 0;
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
for (j = 0; j < CONFIG_CHIP_SELECTS_PER_CTRL; j++) {
fsl_ddr_cfg_regs_t *reg = &ddr_reg[i];
if (reg->cs[j].config & 0x80000000) {
unsigned int end;
end = reg->cs[j].bnds & 0xFFF;
if (end > max_end) {
max_end = end;
}
}
}
}
total_mem = 1 + (((unsigned long long)max_end << 24ULL)
| 0xFFFFFFULL);
}
return total_mem;
}
/*
* fsl_ddr_sdram() -- this is the main function to be called by
* initdram() in the board file.
*
* It returns amount of memory configured in bytes.
*/
phys_size_t fsl_ddr_sdram(void)
{
unsigned int i;
unsigned int memctl_interleaved;
unsigned long long total_memory;
fsl_ddr_info_t info;
/* Reset info structure. */
memset(&info, 0, sizeof(fsl_ddr_info_t));
/* Compute it once normally. */
total_memory = fsl_ddr_compute(&info, STEP_GET_SPD);
/* Check for memory controller interleaving. */
memctl_interleaved = 0;
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
memctl_interleaved +=
info.memctl_opts[i].memctl_interleaving;
}
if (memctl_interleaved) {
if (memctl_interleaved == CONFIG_NUM_DDR_CONTROLLERS) {
debug("memctl interleaving\n");
/*
* Change the meaning of memctl_interleaved
* to be "boolean".
*/
memctl_interleaved = 1;
} else {
printf("Warning: memctl interleaving not "
"properly configured on all controllers\n");
memctl_interleaved = 0;
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++)
info.memctl_opts[i].memctl_interleaving = 0;
debug("Recomputing with memctl_interleaving off.\n");
total_memory = fsl_ddr_compute(&info,
STEP_ASSIGN_ADDRESSES);
}
}
/* Program configuration registers. */
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
debug("Programming controller %u\n", i);
if (info.common_timing_params[i].ndimms_present == 0) {
debug("No dimms present on controller %u; "
"skipping programming\n", i);
continue;
}
fsl_ddr_set_memctl_regs(&(info.fsl_ddr_config_reg[i]), i);
}
if (memctl_interleaved) {
const unsigned int ctrl_num = 0;
/* Only set LAWBAR1 if memory controller interleaving is on. */
fsl_ddr_set_lawbar(&info.common_timing_params[0],
memctl_interleaved, ctrl_num);
} else {
/*
* Memory controller interleaving is NOT on;
* set each lawbar individually.
*/
for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
fsl_ddr_set_lawbar(&info.common_timing_params[i],
0, i);
}
}
debug("total_memory = %llu\n", total_memory);
#if !defined(CONFIG_PHYS_64BIT)
/* Check for 4G or more. Bad. */
if (total_memory >= (1ull << 32)) {
printf("Detected %lld MB of memory\n", total_memory >> 20);
printf("This U-Boot only supports < 4G of DDR\n");
printf("You could rebuild it with CONFIG_PHYS_64BIT\n");
total_memory = CONFIG_MAX_MEM_MAPPED;
}
#endif
return total_memory;
}

View File

@ -0,0 +1,297 @@
/*
* Copyright 2008, 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <common.h>
#include <asm/fsl_ddr_sdram.h>
#include "ddr.h"
/* Board-specific functions defined in each board's ddr.c */
extern void fsl_ddr_board_options(memctl_options_t *popts,
dimm_params_t *pdimm,
unsigned int ctrl_num);
unsigned int populate_memctl_options(int all_DIMMs_registered,
memctl_options_t *popts,
dimm_params_t *pdimm,
unsigned int ctrl_num)
{
unsigned int i;
const char *p;
/* Chip select options. */
/* Pick chip-select local options. */
for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
/* If not DDR2, odt_rd_cfg and odt_wr_cfg need to be 0. */
/* only for single CS? */
popts->cs_local_opts[i].odt_rd_cfg = 0;
popts->cs_local_opts[i].odt_wr_cfg = 1;
popts->cs_local_opts[i].auto_precharge = 0;
}
/* Pick interleaving mode. */
/*
* 0 = no interleaving
* 1 = interleaving between 2 controllers
*/
popts->memctl_interleaving = 0;
/*
* 0 = cacheline
* 1 = page
* 2 = (logical) bank
* 3 = superbank (only if CS interleaving is enabled)
*/
popts->memctl_interleaving_mode = 0;
/*
* 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
* 1: page: bit to the left of the column bits selects the memctl
* 2: bank: bit to the left of the bank bits selects the memctl
* 3: superbank: bit to the left of the chip select selects the memctl
*
* NOTE: ba_intlv (rank interleaving) is independent of memory
* controller interleaving; it is only within a memory controller.
* Must use superbank interleaving if rank interleaving is used and
* memory controller interleaving is enabled.
*/
/*
* 0 = no
* 0x40 = CS0,CS1
* 0x20 = CS2,CS3
* 0x60 = CS0,CS1 + CS2,CS3
* 0x04 = CS0,CS1,CS2,CS3
*/
popts->ba_intlv_ctl = 0;
/* Memory Organization Parameters */
popts->registered_dimm_en = all_DIMMs_registered;
/* Operational Mode Paramters */
/* Pick ECC modes */
#ifdef CONFIG_DDR_ECC
popts->ECC_mode = 1; /* 0 = disabled, 1 = enabled */
#else
popts->ECC_mode = 0; /* 0 = disabled, 1 = enabled */
#endif
popts->ECC_init_using_memctl = 1; /* 0 = use DMA, 1 = use memctl */
/*
* Choose DQS config
* 0 for DDR1
* 1 for DDR2
*/
#if defined(CONFIG_FSL_DDR1)
popts->DQS_config = 0;
#elif defined(CONFIG_FSL_DDR2) || defined(CONFIG_FSL_DDR3)
popts->DQS_config = 1;
#endif
/* Choose self-refresh during sleep. */
popts->self_refresh_in_sleep = 1;
/* Choose dynamic power management mode. */
popts->dynamic_power = 0;
/* 0 = 64-bit, 1 = 32-bit, 2 = 16-bit */
popts->data_bus_width = 0;
/* Choose burst length. */
#if defined(CONFIG_FSL_DDR3)
#if defined(CONFIG_E500MC)
popts->OTF_burst_chop_en = 0; /* on-the-fly burst chop disable */
popts->burst_length = DDR_BL8; /* Fixed 8-beat burst len */
#else
popts->OTF_burst_chop_en = 1; /* on-the-fly burst chop */
popts->burst_length = DDR_OTF; /* on-the-fly BC4 and BL8 */
#endif
#else
popts->burst_length = DDR_BL4; /* has to be 4 for DDR2 */
#endif
/* Choose ddr controller address mirror mode */
#if defined(CONFIG_FSL_DDR3)
popts->mirrored_dimm = pdimm[0].mirrored_dimm;
#endif
/* Global Timing Parameters. */
debug("mclk_ps = %u ps\n", get_memory_clk_period_ps());
/* Pick a caslat override. */
popts->cas_latency_override = 0;
popts->cas_latency_override_value = 3;
if (popts->cas_latency_override) {
debug("using caslat override value = %u\n",
popts->cas_latency_override_value);
}
/* Decide whether to use the computed derated latency */
popts->use_derated_caslat = 0;
/* Choose an additive latency. */
popts->additive_latency_override = 0;
popts->additive_latency_override_value = 3;
if (popts->additive_latency_override) {
debug("using additive latency override value = %u\n",
popts->additive_latency_override_value);
}
/*
* 2T_EN setting
*
* Factors to consider for 2T_EN:
* - number of DIMMs installed
* - number of components, number of active ranks
* - how much time you want to spend playing around
*/
popts->twoT_en = 0;
popts->threeT_en = 0;
/*
* BSTTOPRE precharge interval
*
* Set this to 0 for global auto precharge
*
* FIXME: Should this be configured in picoseconds?
* Why it should be in ps: better understanding of this
* relative to actual DRAM timing parameters such as tRAS.
* e.g. tRAS(min) = 40 ns
*/
popts->bstopre = 0x100;
/* Minimum CKE pulse width -- tCKE(MIN) */
popts->tCKE_clock_pulse_width_ps
= mclk_to_picos(FSL_DDR_MIN_TCKE_PULSE_WIDTH_DDR);
/*
* Window for four activates -- tFAW
*
* FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
* FIXME: varies depending upon number of column addresses or data
* FIXME: width, was considering looking at pdimm->primary_sdram_width
*/
#if defined(CONFIG_FSL_DDR1)
popts->tFAW_window_four_activates_ps = mclk_to_picos(1);
#elif defined(CONFIG_FSL_DDR2)
/*
* x4/x8; some datasheets have 35000
* x16 wide columns only? Use 50000?
*/
popts->tFAW_window_four_activates_ps = 37500;
#elif defined(CONFIG_FSL_DDR3)
popts->tFAW_window_four_activates_ps = pdimm[0].tFAW_ps;
#endif
popts->zq_en = 0;
popts->wrlvl_en = 0;
#if defined(CONFIG_FSL_DDR3)
/*
* due to ddr3 dimm is fly-by topology
* we suggest to enable write leveling to
* meet the tQDSS under different loading.
*/
popts->wrlvl_en = 1;
popts->wrlvl_override = 0;
#endif
/*
* Check interleaving configuration from environment.
* Please refer to doc/README.fsl-ddr for the detail.
*
* If memory controller interleaving is enabled, then the data
* bus widths must be programmed identically for the 2 memory
* controllers.
*
* XXX: Attempt to set both controllers to the same chip select
* interleaving mode. It will do a best effort to get the
* requested ranks interleaved together such that the result
* should be a subset of the requested configuration.
*/
#if (CONFIG_NUM_DDR_CONTROLLERS > 1)
if ((p = getenv("memctl_intlv_ctl")) != NULL) {
if (pdimm[0].n_ranks == 0) {
printf("There is no rank on CS0. Because only rank on "
"CS0 and ranks chip-select interleaved with CS0"
" are controller interleaved, force non memory "
"controller interleaving\n");
popts->memctl_interleaving = 0;
} else {
popts->memctl_interleaving = 1;
if (strcmp(p, "cacheline") == 0)
popts->memctl_interleaving_mode =
FSL_DDR_CACHE_LINE_INTERLEAVING;
else if (strcmp(p, "page") == 0)
popts->memctl_interleaving_mode =
FSL_DDR_PAGE_INTERLEAVING;
else if (strcmp(p, "bank") == 0)
popts->memctl_interleaving_mode =
FSL_DDR_BANK_INTERLEAVING;
else if (strcmp(p, "superbank") == 0)
popts->memctl_interleaving_mode =
FSL_DDR_SUPERBANK_INTERLEAVING;
else
popts->memctl_interleaving_mode =
simple_strtoul(p, NULL, 0);
}
}
#endif
if( ((p = getenv("ba_intlv_ctl")) != NULL) &&
(CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
if (strcmp(p, "cs0_cs1") == 0)
popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
else if (strcmp(p, "cs2_cs3") == 0)
popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
else if (strcmp(p, "cs0_cs1_and_cs2_cs3") == 0)
popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
else if (strcmp(p, "cs0_cs1_cs2_cs3") == 0)
popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
else
popts->ba_intlv_ctl = simple_strtoul(p, NULL, 0);
switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
case FSL_DDR_CS0_CS1_CS2_CS3:
case FSL_DDR_CS0_CS1:
if (pdimm[0].n_ranks != 2) {
popts->ba_intlv_ctl = 0;
printf("Not enough bank(chip-select) for "
"CS0+CS1, force non-interleaving!\n");
}
break;
case FSL_DDR_CS2_CS3:
if (pdimm[1].n_ranks !=2){
popts->ba_intlv_ctl = 0;
printf("Not enough bank(CS) for CS2+CS3, "
"force non-interleaving!\n");
}
break;
case FSL_DDR_CS0_CS1_AND_CS2_CS3:
if ((pdimm[0].n_ranks != 2)||(pdimm[1].n_ranks != 2)) {
popts->ba_intlv_ctl = 0;
printf("Not enough bank(CS) for CS0+CS1 or "
"CS2+CS3, force non-interleaving!\n");
}
break;
default:
popts->ba_intlv_ctl = 0;
break;
}
}
fsl_ddr_board_options(popts, pdimm, ctrl_num);
return 0;
}

View File

@ -0,0 +1,206 @@
/*
* Copyright 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* Version 2 as published by the Free Software Foundation.
*/
#include <common.h>
#include <asm/fsl_law.h>
#include "ddr.h"
unsigned int fsl_ddr_get_mem_data_rate(void);
/*
* Round mclk_ps to nearest 10 ps in memory controller code.
*
* If an imprecise data rate is too high due to rounding error
* propagation, compute a suitably rounded mclk_ps to compute
* a working memory controller configuration.
*/
unsigned int get_memory_clk_period_ps(void)
{
unsigned int mclk_ps;
mclk_ps = 2000000000000ULL / fsl_ddr_get_mem_data_rate();
/* round to nearest 10 ps */
return 10 * ((mclk_ps + 5) / 10);
}
/* Convert picoseconds into DRAM clock cycles (rounding up if needed). */
unsigned int picos_to_mclk(unsigned int picos)
{
const unsigned long long ULL_2e12 = 2000000000000ULL;
const unsigned long long ULL_8Fs = 0xFFFFFFFFULL;
unsigned long long clks;
unsigned long long clks_temp;
if (!picos)
return 0;
clks = fsl_ddr_get_mem_data_rate() * (unsigned long long) picos;
clks_temp = clks;
clks = clks / ULL_2e12;
if (clks_temp % ULL_2e12) {
clks++;
}
if (clks > ULL_8Fs) {
clks = ULL_8Fs;
}
return (unsigned int) clks;
}
unsigned int mclk_to_picos(unsigned int mclk)
{
return get_memory_clk_period_ps() * mclk;
}
void
__fsl_ddr_set_lawbar(const common_timing_params_t *memctl_common_params,
unsigned int memctl_interleaved,
unsigned int ctrl_num)
{
unsigned long long base = memctl_common_params->base_address;
unsigned long long size = memctl_common_params->total_mem;
/*
* If no DIMMs on this controller, do not proceed any further.
*/
if (!memctl_common_params->ndimms_present) {
return;
}
#if !defined(CONFIG_PHYS_64BIT)
if (base >= CONFIG_MAX_MEM_MAPPED)
return;
if ((base + size) >= CONFIG_MAX_MEM_MAPPED)
size = CONFIG_MAX_MEM_MAPPED - base;
#endif
if (ctrl_num == 0) {
/*
* Set up LAW for DDR controller 1 space.
*/
unsigned int lawbar1_target_id = memctl_interleaved
? LAW_TRGT_IF_DDR_INTRLV : LAW_TRGT_IF_DDR_1;
if (set_ddr_laws(base, size, lawbar1_target_id) < 0) {
printf("%s: ERROR (ctrl #0, intrlv=%d)\n", __func__,
memctl_interleaved);
return ;
}
} else if (ctrl_num == 1) {
if (set_ddr_laws(base, size, LAW_TRGT_IF_DDR_2) < 0) {
printf("%s: ERROR (ctrl #1)\n", __func__);
return ;
}
} else {
printf("%s: unexpected DDR controller number (%u)\n", __func__,
ctrl_num);
}
}
__attribute__((weak, alias("__fsl_ddr_set_lawbar"))) void
fsl_ddr_set_lawbar(const common_timing_params_t *memctl_common_params,
unsigned int memctl_interleaved,
unsigned int ctrl_num);
void board_add_ram_info(int use_default)
{
#if defined(CONFIG_MPC85xx)
volatile ccsr_ddr_t *ddr = (void *)(CONFIG_SYS_MPC85xx_DDR_ADDR);
#elif defined(CONFIG_MPC86xx)
volatile ccsr_ddr_t *ddr = (void *)(CONFIG_SYS_MPC86xx_DDR_ADDR);
#endif
#if (CONFIG_NUM_DDR_CONTROLLERS > 1)
uint32_t cs0_config = in_be32(&ddr->cs0_config);
#endif
uint32_t sdram_cfg = in_be32(&ddr->sdram_cfg);
int cas_lat;
puts(" (DDR");
switch ((sdram_cfg & SDRAM_CFG_SDRAM_TYPE_MASK) >>
SDRAM_CFG_SDRAM_TYPE_SHIFT) {
case SDRAM_TYPE_DDR1:
puts("1");
break;
case SDRAM_TYPE_DDR2:
puts("2");
break;
case SDRAM_TYPE_DDR3:
puts("3");
break;
default:
puts("?");
break;
}
if (sdram_cfg & SDRAM_CFG_32_BE)
puts(", 32-bit");
else
puts(", 64-bit");
/* Calculate CAS latency based on timing cfg values */
cas_lat = ((in_be32(&ddr->timing_cfg_1) >> 16) & 0xf) + 1;
if ((in_be32(&ddr->timing_cfg_3) >> 12) & 1)
cas_lat += (8 << 1);
printf(", CL=%d", cas_lat >> 1);
if (cas_lat & 0x1)
puts(".5");
if (sdram_cfg & SDRAM_CFG_ECC_EN)
puts(", ECC on)");
else
puts(", ECC off)");
#if (CONFIG_NUM_DDR_CONTROLLERS > 1)
if (cs0_config & 0x20000000) {
puts("\n");
puts(" DDR Controller Interleaving Mode: ");
switch ((cs0_config >> 24) & 0xf) {
case FSL_DDR_CACHE_LINE_INTERLEAVING:
puts("cache line");
break;
case FSL_DDR_PAGE_INTERLEAVING:
puts("page");
break;
case FSL_DDR_BANK_INTERLEAVING:
puts("bank");
break;
case FSL_DDR_SUPERBANK_INTERLEAVING:
puts("super-bank");
break;
default:
puts("invalid");
break;
}
}
#endif
if ((sdram_cfg >> 8) & 0x7f) {
puts("\n");
puts(" DDR Chip-Select Interleaving Mode: ");
switch(sdram_cfg >> 8 & 0x7f) {
case FSL_DDR_CS0_CS1_CS2_CS3:
puts("CS0+CS1+CS2+CS3");
break;
case FSL_DDR_CS0_CS1:
puts("CS0+CS1");
break;
case FSL_DDR_CS2_CS3:
puts("CS2+CS3");
break;
case FSL_DDR_CS0_CS1_AND_CS2_CS3:
puts("CS0+CS1 and CS2+CS3");
break;
default:
puts("invalid");
break;
}
}
}

View File

@ -0,0 +1,55 @@
/*
* Copyright 2009 Freescale Semiconductor, Inc.
*
* This file is derived from arch/powerpc/cpu/mpc85xx/cpu.c and
* arch/powerpc/cpu/mpc86xx/cpu.c. Basically this file contains
* cpu specific common code for 85xx/86xx processors.
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <libfdt.h>
#include <fdt_support.h>
void ft_fixup_num_cores(void *blob) {
int off, num_cores, del_cores;
del_cores = 0;
num_cores = cpu_numcores();
off = fdt_node_offset_by_prop_value(blob, -1, "device_type", "cpu", 4);
while (off != -FDT_ERR_NOTFOUND) {
u32 *reg = (u32 *)fdt_getprop(blob, off, "reg", 0);
/* if we find a cpu node outside of what we expect delete it
* and reset the offset back to the start since we can't
* trust the offsets anymore
*/
if (*reg > num_cores-1) {
fdt_del_node(blob, off);
del_cores++;
off = -1;
}
off = fdt_node_offset_by_prop_value(blob, off,
"device_type", "cpu", 4);
}
debug ("%x core system found\n", num_cores);
debug ("deleted %d extra core entry entries from device tree\n",
del_cores);
}

View File

@ -0,0 +1,214 @@
/*
* Copyright 2009-2010 Freescale Semiconductor, Inc.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <asm/fsl_law.h>
#include <pci.h>
struct pci_info {
u32 cfg;
};
/* The cfg field is a bit mask in which each bit represents the value of
* cfg_IO_ports[] signal and the bit is set if the interface would be
* enabled based on the value of cfg_IO_ports[] signal
*
* On MPC86xx/PQ3 based systems:
* we extract cfg_IO_ports from GUTS register PORDEVSR
*
* cfg_IO_ports only exist on systems w/PCIe (we set cfg 0 for systems
* without PCIe)
*/
#if defined(CONFIG_MPC8540) || defined(CONFIG_MPC8560)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCI] = {
.cfg = 0,
},
};
#elif defined(CONFIG_MPC8541) || defined(CONFIG_MPC8555)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCI] = {
.cfg = 0,
},
};
#elif defined(CONFIG_MPC8536)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCI] = {
.cfg = 0,
},
[LAW_TRGT_IF_PCIE_1] = {
.cfg = (1 << 2) | (1 << 3) | (1 << 5) | (1 << 7),
},
[LAW_TRGT_IF_PCIE_2] = {
.cfg = (1 << 5) | (1 << 7),
},
[LAW_TRGT_IF_PCIE_3] = {
.cfg = (1 << 7),
},
};
#elif defined(CONFIG_MPC8544)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCI] = {
.cfg = 0,
},
[LAW_TRGT_IF_PCIE_1] = {
.cfg = (1 << 2) | (1 << 3) | (1 << 4) | (1 << 5) |
(1 << 6) | (1 << 7),
},
[LAW_TRGT_IF_PCIE_2] = {
.cfg = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7),
},
[LAW_TRGT_IF_PCIE_3] = {
.cfg = (1 << 6) | (1 << 7),
},
};
#elif defined(CONFIG_MPC8548)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCI_1] = {
.cfg = 0,
},
[LAW_TRGT_IF_PCI_2] = {
.cfg = 0,
},
/* PCI_2 is always host and we dont use iosel to determine enable/disable */
[LAW_TRGT_IF_PCIE_1] = {
.cfg = (1 << 3) | (1 << 4) | (1 << 7),
},
};
#elif defined(CONFIG_MPC8568)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCI] = {
.cfg = 0,
},
[LAW_TRGT_IF_PCIE_1] = {
.cfg = (1 << 3) | (1 << 4) | (1 << 7),
},
};
#elif defined(CONFIG_MPC8569)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCIE_1] = {
.cfg = (1 << 0) | (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) |
(1 << 8) | (1 << 0xc) | (1 << 0xf),
},
};
#elif defined(CONFIG_MPC8572)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCIE_1] = {
.cfg = (1 << 2) | (1 << 3) | (1 << 7) |
(1 << 0xb) | (1 << 0xc) | (1 << 0xf),
},
[LAW_TRGT_IF_PCIE_2] = {
.cfg = (1 << 3) | (1 << 7),
},
[LAW_TRGT_IF_PCIE_3] = {
.cfg = (1 << 7),
},
};
#elif defined(CONFIG_MPC8610)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCI_1] = {
.cfg = 0,
},
[LAW_TRGT_IF_PCIE_1] = {
.cfg = (1 << 1) | (1 << 4),
},
[LAW_TRGT_IF_PCIE_2] = {
.cfg = (1 << 0) | (1 << 4),
},
};
#elif defined(CONFIG_MPC8641)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCIE_1] = {
.cfg = (1 << 2) | (1 << 3) | (1 << 5) | (1 << 6) |
(1 << 7) | (1 << 0xe) | (1 << 0xf),
},
};
#elif defined(CONFIG_P1011) || defined(CONFIG_P1020) || \
defined(CONFIG_P1012) || defined(CONFIG_P1021)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCIE_1] = {
.cfg = (1 << 0) | (1 << 6) | (1 << 0xe) | (1 << 0xf),
},
[LAW_TRGT_IF_PCIE_2] = {
.cfg = (1 << 0xe),
},
};
#elif defined(CONFIG_P1013) || defined(CONFIG_P1022)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCIE_1] = {
.cfg = (1 << 6) | (1 << 7) | (1 << 9) | (1 << 0xa) |
(1 << 0xb) | (1 << 0xd) | (1 << 0xe) |
(1 << 0xf) | (1 << 0x15) | (1 << 0x16) |
(1 << 0x17) | (1 << 0x18) | (1 << 0x19) |
(1 << 0x1a) | (1 << 0x1b) | (1 << 0x1c) |
(1 << 0x1d) | (1 << 0x1e) | (1 << 0x1f),
},
[LAW_TRGT_IF_PCIE_2] = {
.cfg = (1 << 0) | (1 << 1) | (1 << 6) | (1 << 7) |
(1 << 9) | (1 << 0xa) | (1 << 0xb) | (1 << 0xd) |
(1 << 0x15) | (1 << 0x16) | (1 << 0x17) |
(1 << 0x18) | (1 << 0x1c),
},
[LAW_TRGT_IF_PCIE_3] = {
.cfg = (1 << 6) | (1 << 7) | (1 << 9) | (1 << 0xd) |
(1 << 0x15) | (1 << 0x16) | (1 << 0x17) | (1 << 0x18) |
(1 << 0x19) | (1 << 0x1a) | (1 << 0x1b),
},
};
#elif defined(CONFIG_P2010) || defined(CONFIG_P2020)
static struct pci_info pci_config_info[] =
{
[LAW_TRGT_IF_PCIE_1] = {
.cfg = (1 << 0) | (1 << 2) | (1 << 4) | (1 << 6) |
(1 << 0xd) | (1 << 0xe) | (1 << 0xf),
},
[LAW_TRGT_IF_PCIE_2] = {
.cfg = (1 << 2) | (1 << 0xe),
},
[LAW_TRGT_IF_PCIE_3] = {
.cfg = (1 << 2) | (1 << 4),
},
};
#elif defined(CONFIG_FSL_CORENET)
#else
#error Need to define pci_config_info for processor
#endif
#ifndef CONFIG_FSL_CORENET
int is_fsl_pci_cfg(enum law_trgt_if trgt, u32 io_sel)
{
return ((1 << io_sel) & pci_config_info[trgt].cfg);
}
#endif